From: Ben Noordhuis Date: Tue, 11 Jun 2013 21:45:46 +0000 (+0200) Subject: v8: upgrade to v3.19.13 X-Git-Tag: v0.11.3~48 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=6dd78074a3c0a7579ca5e919021587c22ff763ae;p=platform%2Fupstream%2Fnodejs.git v8: upgrade to v3.19.13 --- diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 39885e7..6117e56 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,105 @@ +2013-06-11: Version 3.19.13 + + Performance and stability improvements on all platforms. + + +2013-06-10: Version 3.19.12 + + Fixed arguments array access. (Chromium issue 247303) + + Fixed bug in LookupForWrite. (Chromium issue 242332) + + Performance and stability improvements on all platforms. + + +2013-06-07: Version 3.19.11 + + Performance and stability improvements on all platforms. + + +2013-06-06: Version 3.19.10 + + Performance and stability improvements on all platforms. + + +2013-06-05: Version 3.19.9 + + Implemented Load IC support for loading properties from primitive + values to avoid perpetual soft deopts. (Chromium issue 242512) + + Implemented Freeing of PerThreadAssertData when possible to avoid + memory leak. (Chromium issue 246567) + + Removed V8_USE_OLD_STYLE_PERSISTENT_HANDLE_VISITORS. + + Performance and stability improvements on all platforms. + + +2013-06-03: Version 3.19.8 + + Fixed bug with inlining 'Array' function. (Chromium issue 244461) + + Fixed initialization of literal objects. (Chromium issue 245424) + + Fixed function name inferred inside closures. (Chromium issue 224884) + + Performance and stability improvements on all platforms. + + +2013-05-31: Version 3.19.7 + + Added support for //# sourceURL similar to deprecated //@ sourceURL one. + (issue 2702) + + Made sure IfBuilder::Return clears the current block. + (Chromium issue 243868) + + Fixed two CPU profiler tests on ARM and MIPS simulators + (issue 2628) + + Fixed idle incremental GC for large objects. + (Chromium issue 241815) + + Disabled --optimize-constructed-arrays due to crashes + (Chromium issue 244461) + + Performance and stability improvements on all platforms. + + +2013-05-28: Version 3.19.6 + + Fixed IfBuilder::Deopt to clear the current block + (Chromium issue 243868). + + Performance and stability improvements on all platforms. + + +2013-05-27: Version 3.19.5 + + Reset regexp parser flag after scanning ahead for capture groups. + (issue 2690) + + Removed flakiness in test-cpu-profiler/SampleWhenFrameIsNotSetup. + (issue 2628) + + Performance and stability improvements on all platforms. + + +2013-05-24: Version 3.19.4 + + Fixed edge case in stack trace formatting. (Chromium issue 237617) + + Fixed embedded new-space pointer in LCmpObjectEqAndBranch. (Chromium + issue 240032) + + Made Object.freeze fast (issue 1858, Chromium issue 115960) + + Fixed bogus deopt in BuildEmitDeepCopy for holey arrays. (Chromium issue + 242924) + + Performance and stability improvements on all platforms. + + 2013-05-22: Version 3.19.3 Performance and stability improvements on all platforms. diff --git a/deps/v8/Makefile.nacl b/deps/v8/Makefile.nacl index e8fc3d2..0c98021 100644 --- a/deps/v8/Makefile.nacl +++ b/deps/v8/Makefile.nacl @@ -46,7 +46,7 @@ else endif endif -TOOLCHAIN_PATH = ${NACL_SDK_ROOT}/toolchain +TOOLCHAIN_PATH = $(realpath ${NACL_SDK_ROOT}/toolchain) NACL_TOOLCHAIN ?= ${TOOLCHAIN_PATH}/${TOOLCHAIN_DIR} ifeq ($(ARCH), nacl_ia32) diff --git a/deps/v8/build/common.gypi b/deps/v8/build/common.gypi index ad6ccdf..127749a 100644 --- a/deps/v8/build/common.gypi +++ b/deps/v8/build/common.gypi @@ -129,22 +129,13 @@ 'defines': [ 'V8_TARGET_ARCH_ARM', ], - 'variables': { - 'armsimulator': '&1 | grep -q "^Target: arm" && echo "no" || echo "yes")', - }, - 'conditions': [ - [ 'v8_can_use_unaligned_accesses=="true"', { - 'defines': [ - 'CAN_USE_UNALIGNED_ACCESSES=1', - ], - }, { - 'defines': [ - 'CAN_USE_UNALIGNED_ACCESSES=0', - ], - }], - ['armsimulator=="no"', { - 'target_conditions': [ - ['_toolset=="target"', { + 'target_conditions': [ + ['_toolset=="host"', { + 'variables': { + 'armcompiler': '&1 | grep -q "^Target: arm" && echo "yes" || echo "no")', + }, + 'conditions': [ + ['armcompiler=="yes"', { 'conditions': [ [ 'armv7==1', { 'cflags': ['-march=armv7-a',], @@ -159,9 +150,9 @@ [ 'arm_fpu!="default"', { 'cflags': ['-mfpu=<(arm_fpu)',], }], - ] + ], }], - ] + ], }], [ 'arm_float_abi!="default"', { 'cflags': ['-mfloat-abi=<(arm_float_abi)',], @@ -172,63 +163,149 @@ [ 'arm_thumb==0', { 'cflags': ['-marm',], }], + [ 'arm_test=="on"', { + 'defines': [ + 'ARM_TEST', + ], + }], + ], + }, { + # armcompiler=="no" + 'conditions': [ + [ 'armv7==1 or armv7=="default"', { + 'defines': [ + 'CAN_USE_ARMV7_INSTRUCTIONS=1', + ], + 'conditions': [ + [ 'arm_fpu=="default"', { + 'defines': [ + 'CAN_USE_VFP3_INSTRUCTIONS', + ], + }], + [ 'arm_fpu=="vfpv3-d16"', { + 'defines': [ + 'CAN_USE_VFP3_INSTRUCTIONS', + ], + }], + [ 'arm_fpu=="vfpv3"', { + 'defines': [ + 'CAN_USE_VFP3_INSTRUCTIONS', + 'CAN_USE_VFP32DREGS', + ], + }], + [ 'arm_fpu=="neon" or arm_neon==1', { + 'defines': [ + 'CAN_USE_VFP3_INSTRUCTIONS', + 'CAN_USE_VFP32DREGS', + ], + }], + ], + }], + [ 'arm_float_abi=="hard"', { + 'defines': [ + 'USE_EABI_HARDFLOAT=1', + ], + }], + [ 'arm_float_abi=="softfp" or arm_float_abi=="default"', { + 'defines': [ + 'USE_EABI_HARDFLOAT=0', + ], + }], ], - }], - ], - 'conditions': [ - [ 'arm_test=="on"', { 'defines': [ 'ARM_TEST', ], }], ], - }], - ['armsimulator=="yes"', { - 'defines': [ - 'ARM_TEST', - ], + }], # _toolset=="host" + ['_toolset=="target"', { + 'variables': { + 'armcompiler': '&1 | grep -q "^Target: arm" && echo "yes" || echo "no")', + }, 'conditions': [ - [ 'armv7==1 or armv7=="default"', { - 'defines': [ - 'CAN_USE_ARMV7_INSTRUCTIONS=1', - ], + ['armcompiler=="yes"', { 'conditions': [ - [ 'arm_fpu=="default"', { + [ 'armv7==1', { + 'cflags': ['-march=armv7-a',], + }], + [ 'armv7==1 or armv7=="default"', { + 'conditions': [ + [ 'arm_neon==1', { + 'cflags': ['-mfpu=neon',], + }, + { + 'conditions': [ + [ 'arm_fpu!="default"', { + 'cflags': ['-mfpu=<(arm_fpu)',], + }], + ], + }], + ], + }], + [ 'arm_float_abi!="default"', { + 'cflags': ['-mfloat-abi=<(arm_float_abi)',], + }], + [ 'arm_thumb==1', { + 'cflags': ['-mthumb',], + }], + [ 'arm_thumb==0', { + 'cflags': ['-marm',], + }], + [ 'arm_test=="on"', { 'defines': [ - 'CAN_USE_VFP3_INSTRUCTIONS', + 'ARM_TEST', ], }], - [ 'arm_fpu=="vfpv3-d16"', { + ], + }, { + # armcompiler=="no" + 'conditions': [ + [ 'armv7==1 or armv7=="default"', { 'defines': [ - 'CAN_USE_VFP3_INSTRUCTIONS', + 'CAN_USE_ARMV7_INSTRUCTIONS=1', + ], + 'conditions': [ + [ 'arm_fpu=="default"', { + 'defines': [ + 'CAN_USE_VFP3_INSTRUCTIONS', + ], + }], + [ 'arm_fpu=="vfpv3-d16"', { + 'defines': [ + 'CAN_USE_VFP3_INSTRUCTIONS', + ], + }], + [ 'arm_fpu=="vfpv3"', { + 'defines': [ + 'CAN_USE_VFP3_INSTRUCTIONS', + 'CAN_USE_VFP32DREGS', + ], + }], + [ 'arm_fpu=="neon" or arm_neon==1', { + 'defines': [ + 'CAN_USE_VFP3_INSTRUCTIONS', + 'CAN_USE_VFP32DREGS', + ], + }], ], }], - [ 'arm_fpu=="vfpv3"', { + [ 'arm_float_abi=="hard"', { 'defines': [ - 'CAN_USE_VFP3_INSTRUCTIONS', - 'CAN_USE_VFP32DREGS', + 'USE_EABI_HARDFLOAT=1', ], }], - [ 'arm_fpu=="neon" or arm_neon==1', { + [ 'arm_float_abi=="softfp" or arm_float_abi=="default"', { 'defines': [ - 'CAN_USE_VFP3_INSTRUCTIONS', - 'CAN_USE_VFP32DREGS', + 'USE_EABI_HARDFLOAT=0', ], }], ], - }], - [ 'arm_float_abi=="hard"', { - 'defines': [ - 'USE_EABI_HARDFLOAT=1', - ], - }], - [ 'arm_float_abi=="softfp" or arm_float_abi=="default"', { 'defines': [ - 'USE_EABI_HARDFLOAT=0', + 'ARM_TEST', ], }], - ] - }], + ], + }], # _toolset=="target" ], }], # v8_target_arch=="arm" ['v8_target_arch=="ia32"', { @@ -453,6 +530,15 @@ }], ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \ or OS=="android"', { + 'cflags!': [ + '-O2', + '-Os', + ], + 'cflags': [ + '-fdata-sections', + '-ffunction-sections', + '-O3', + ], 'conditions': [ [ 'gcc_version==44 and clang==0', { 'cflags': [ diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h index bc50b6f..d740df3 100644 --- a/deps/v8/include/v8-profiler.h +++ b/deps/v8/include/v8-profiler.h @@ -184,19 +184,21 @@ class V8EXPORT CpuProfiler { V8_DEPRECATED(static const CpuProfile* GetProfile( int index, Handle security_token = Handle())); - /** Returns a profile by index. */ - const CpuProfile* GetCpuProfile( + /** Deprecated. Use GetCpuProfile with single parameter. */ + V8_DEPRECATED(const CpuProfile* GetCpuProfile( int index, - Handle security_token = Handle()); + Handle security_token)); + /** Returns a profile by index. */ + const CpuProfile* GetCpuProfile(int index); /** Deprecated. Use FindProfile instead. */ V8_DEPRECATED(static const CpuProfile* FindProfile( unsigned uid, Handle security_token = Handle())); /** Returns a profile by uid. */ - const CpuProfile* FindCpuProfile( + V8_DEPRECATED(const CpuProfile* FindCpuProfile( unsigned uid, - Handle security_token = Handle()); + Handle security_token = Handle())); /** Deprecated. Use StartCpuProfiling instead. */ V8_DEPRECATED(static void StartProfiling(Handle title, @@ -219,12 +221,16 @@ class V8EXPORT CpuProfiler { Handle title, Handle security_token = Handle())); /** + * Deprecated. Use StopCpuProfiling with one parameter instead. + */ + V8_DEPRECATED(const CpuProfile* StopCpuProfiling( + Handle title, + Handle security_token)); + /** * Stops collecting CPU profile with a given title and returns it. * If the title given is empty, finishes the last profile started. */ - const CpuProfile* StopCpuProfiling( - Handle title, - Handle security_token = Handle()); + const CpuProfile* StopCpuProfiling(Handle title); /** Deprecated. Use DeleteAllCpuProfiles instead. */ V8_DEPRECATED(static void DeleteAllProfiles()); @@ -438,7 +444,7 @@ class V8EXPORT HeapProfiler { /** Deprecated. Use FindHeapSnapshot instead. */ V8_DEPRECATED(static const HeapSnapshot* FindSnapshot(unsigned uid)); /** Returns a profile by uid. */ - const HeapSnapshot* FindHeapSnapshot(unsigned uid); + V8_DEPRECATED(const HeapSnapshot* FindHeapSnapshot(unsigned uid)); /** Deprecated. Use GetObjectId instead. */ V8_DEPRECATED(static SnapshotObjectId GetSnapshotObjectId( diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index b3dff3f..c0bec79 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -214,11 +214,6 @@ class WeakReferenceCallbacks { P* parameter); }; -// TODO(svenpanne) Temporary definition until Chrome is in sync. -typedef void (*NearDeathCallback)(Isolate* isolate, - Persistent object, - void* parameter); - // --- Handles --- #define TYPE_CHECK(T, S) \ @@ -370,11 +365,11 @@ template class Handle { #endif private: - template - friend class Persistent; - template - friend class Local; + template friend class Persistent; + template friend class Local; friend class Arguments; + template friend class FunctionCallbackInfo; + template friend class PropertyCallbackInfo; friend class String; friend class Object; friend class AccessorInfo; @@ -385,6 +380,7 @@ template class Handle { friend class Context; friend class InternalHandleHelper; friend class LocalContext; + friend class HandleScope; #ifndef V8_USE_UNSAFE_HANDLES V8_INLINE(static Handle New(Isolate* isolate, T* that)); @@ -458,17 +454,18 @@ template class Local : public Handle { #endif private: - template - friend class Persistent; - template - friend class Handle; + template friend class Persistent; + template friend class Handle; friend class Arguments; + template friend class FunctionCallbackInfo; + template friend class PropertyCallbackInfo; friend class String; friend class Object; friend class AccessorInfo; friend class Context; friend class InternalHandleHelper; friend class LocalContext; + friend class HandleScope; V8_INLINE(static Local New(Isolate* isolate, T* that)); }; @@ -516,6 +513,10 @@ template class Persistent // NOLINT template V8_INLINE(Persistent(Isolate* isolate, Handle that)) : val_(*New(isolate, that)) { } + template V8_INLINE(Persistent(Isolate* isolate, + Persistent& that)) // NOLINT + : val_(*New(isolate, that)) { } + #else /** * Creates an empty persistent handle that doesn't point to any @@ -563,6 +564,7 @@ template class Persistent // NOLINT #endif +#ifdef V8_USE_UNSAFE_HANDLES template V8_INLINE(static Persistent Cast(Persistent that)) { #ifdef V8_ENABLE_CHECKS // If we're going to perform the type check then we have to check @@ -576,6 +578,22 @@ template class Persistent // NOLINT return Persistent::Cast(*this); } +#else + template + V8_INLINE(static Persistent& Cast(Persistent& that)) { // NOLINT +#ifdef V8_ENABLE_CHECKS + // If we're going to perform the type check then we have to check + // that the handle isn't empty before doing the checked cast. + if (!that.IsEmpty()) T::Cast(*that); +#endif + return reinterpret_cast&>(that); + } + + template V8_INLINE(Persistent& As()) { // NOLINT + return Persistent::Cast(*this); + } +#endif + V8_DEPRECATED(static Persistent New(Handle that)); /** @@ -615,38 +633,47 @@ template class Persistent // NOLINT * This handle's reference, and any other references to the storage * cell remain and IsEmpty will still return false. */ - // TODO(dcarney): remove before cutover - V8_INLINE(void Dispose(Isolate* isolate)); + // TODO(dcarney): deprecate + V8_INLINE(void Dispose(Isolate* isolate)) { Dispose(); } + /** + * Make the reference to this object weak. When only weak handles + * refer to the object, the garbage collector will perform a + * callback to the given V8::NearDeathCallback function, passing + * it the object reference and the given parameters. + */ template V8_INLINE(void MakeWeak( - Isolate* isolate, P* parameters, typename WeakReferenceCallbacks::Revivable callback)); template V8_INLINE(void MakeWeak( - Isolate* isolate, P* parameters, typename WeakReferenceCallbacks::Revivable callback)); - /** - * Make the reference to this object weak. When only weak handles - * refer to the object, the garbage collector will perform a - * callback to the given V8::NearDeathCallback function, passing - * it the object reference and the given parameters. - */ - // TODO(dcarney): remove before cutover - V8_INLINE(void MakeWeak(Isolate* isolate, - void* parameters, - NearDeathCallback callback)); + // TODO(dcarney): deprecate + template + V8_INLINE(void MakeWeak( + Isolate* isolate, + P* parameters, + typename WeakReferenceCallbacks::Revivable callback)) { + MakeWeak(parameters, callback); + } - V8_INLINE(void ClearWeak()); + // TODO(dcarney): deprecate + template + V8_INLINE(void MakeWeak( + Isolate* isolate, + P* parameters, + typename WeakReferenceCallbacks::Revivable callback)) { + MakeWeak

(parameters, callback); + } - // TODO(dcarney): remove before cutover - V8_INLINE(void ClearWeak(Isolate* isolate)); + V8_INLINE(void ClearWeak()); - V8_INLINE(void MarkIndependent()); + // TODO(dcarney): deprecate + V8_INLINE(void ClearWeak(Isolate* isolate)) { ClearWeak(); } /** * Marks the reference to this object independent. Garbage collector is free @@ -654,10 +681,10 @@ template class Persistent // NOLINT * independent handle should not assume that it will be preceded by a global * GC prologue callback or followed by a global GC epilogue callback. */ - // TODO(dcarney): remove before cutover - V8_INLINE(void MarkIndependent(Isolate* isolate)); + V8_INLINE(void MarkIndependent()); - V8_INLINE(void MarkPartiallyDependent()); + // TODO(dcarney): deprecate + V8_INLINE(void MarkIndependent(Isolate* isolate)) { MarkIndependent(); } /** * Marks the reference to this object partially dependent. Partially dependent @@ -667,49 +694,63 @@ template class Persistent // NOLINT * external dependencies. This mark is automatically cleared after each * garbage collection. */ - // TODO(dcarney): remove before cutover - V8_INLINE(void MarkPartiallyDependent(Isolate* isolate)); + V8_INLINE(void MarkPartiallyDependent()); - V8_INLINE(bool IsIndependent() const); + // TODO(dcarney): deprecate + V8_INLINE(void MarkPartiallyDependent(Isolate* isolate)) { + MarkPartiallyDependent(); + } - // TODO(dcarney): remove before cutover - V8_INLINE(bool IsIndependent(Isolate* isolate) const); + V8_INLINE(bool IsIndependent() const); - V8_INLINE(bool IsNearDeath() const); + // TODO(dcarney): deprecate + V8_INLINE(bool IsIndependent(Isolate* isolate) const) { + return IsIndependent(); + } /** Checks if the handle holds the only reference to an object. */ - // TODO(dcarney): remove before cutover - V8_INLINE(bool IsNearDeath(Isolate* isolate) const); + V8_INLINE(bool IsNearDeath() const); - V8_INLINE(bool IsWeak() const); + // TODO(dcarney): deprecate + V8_INLINE(bool IsNearDeath(Isolate* isolate) const) { return IsNearDeath(); } /** Returns true if the handle's reference is weak. */ - // TODO(dcarney): remove before cutover - V8_INLINE(bool IsWeak(Isolate* isolate) const); + V8_INLINE(bool IsWeak() const); - V8_INLINE(void SetWrapperClassId(uint16_t class_id)); + // TODO(dcarney): deprecate + V8_INLINE(bool IsWeak(Isolate* isolate) const) { return IsWeak(); } /** * Assigns a wrapper class ID to the handle. See RetainedObjectInfo interface * description in v8-profiler.h for details. */ - // TODO(dcarney): remove before cutover - V8_INLINE(void SetWrapperClassId(Isolate* isolate, uint16_t class_id)); + V8_INLINE(void SetWrapperClassId(uint16_t class_id)); - V8_INLINE(uint16_t WrapperClassId() const); + // TODO(dcarney): deprecate + V8_INLINE(void SetWrapperClassId(Isolate* isolate, uint16_t class_id)) { + SetWrapperClassId(class_id); + } /** * Returns the class ID previously assigned to this handle or 0 if no class ID * was previously assigned. */ - // TODO(dcarney): remove before cutover - V8_INLINE(uint16_t WrapperClassId(Isolate* isolate) const); + V8_INLINE(uint16_t WrapperClassId() const); + + // TODO(dcarney): deprecate + V8_INLINE(uint16_t WrapperClassId(Isolate* isolate) const) { + return WrapperClassId(); + } /** * Disposes the current contents of the handle and replaces it. */ V8_INLINE(void Reset(Isolate* isolate, const Handle& other)); +#ifndef V8_USE_UNSAFE_HANDLES + V8_INLINE(void Reset(Isolate* isolate, const Persistent& other)); +#endif + /** * Returns the underlying raw pointer and clears the handle. The caller is * responsible of eventually destroying the underlying object (by creating a @@ -722,10 +763,7 @@ template class Persistent // NOLINT #ifndef V8_USE_UNSAFE_HANDLES -#ifndef V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT - private: -#endif // TODO(dcarney): make unlinkable before cutover V8_INLINE(Persistent(const Persistent& that)) : val_(that.val_) {} // TODO(dcarney): make unlinkable before cutover @@ -748,21 +786,17 @@ template class Persistent // NOLINT } // TODO(dcarney): remove before cutover V8_INLINE(T* operator*() const) { return val_; } - public: -#ifndef V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW private: -#endif // TODO(dcarney): remove before cutover V8_INLINE(T* operator->() const) { return val_; } public: #endif private: - template - friend class Handle; - template - friend class Local; + template friend class Handle; + template friend class Local; + template friend class ReturnValue; friend class ImplementationUtilities; friend class ObjectTemplate; friend class Context; @@ -1202,7 +1236,8 @@ class V8EXPORT StackFrame { /** * Returns the name of the resource that contains the script for the * function for this StackFrame or sourceURL value if the script name - * is undefined and its source ends with //@ sourceURL=... string. + * is undefined and its source ends with //# sourceURL=... string or + * deprecated //@ sourceURL=... string. */ Local GetScriptNameOrSourceURL() const; @@ -1435,6 +1470,8 @@ class V8EXPORT Value : public Data { bool Equals(Handle that) const; bool StrictEquals(Handle that) const; + template V8_INLINE(static Value* Cast(T* value)); + private: V8_INLINE(bool QuickIsUndefined() const); V8_INLINE(bool QuickIsNull() const); @@ -1490,11 +1527,19 @@ class V8EXPORT String : public Primitive { V8_DEPRECATED(V8_INLINE(bool MayContainNonAscii()) const) { return true; } /** - * Returns whether this string contains only one byte data. + * Returns whether this string is known to contain only one byte data. + * Does not read the string. + * False negatives are possible. */ bool IsOneByte() const; /** + * Returns whether this string contain only one byte data. + * Will read the entire string in some cases. + */ + bool ContainsOnlyOneByte() const; + + /** * Write the contents of the string to an external buffer. * If no arguments are given, expects the buffer to be large * enough to hold the entire string and NULL terminator. Copies @@ -2325,6 +2370,9 @@ class V8EXPORT Function : public Object { static void CheckCast(Value* obj); }; +#ifndef V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT +#define V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT 2 +#endif /** * An instance of the built-in ArrayBuffer constructor (ES6 draft 15.13.5). @@ -2333,30 +2381,102 @@ class V8EXPORT Function : public Object { class V8EXPORT ArrayBuffer : public Object { public: /** - * Data length in bytes. + * Allocator that V8 uses to allocate |ArrayBuffer|'s memory. + * The allocator is a global V8 setting. It should be set with + * V8::SetArrayBufferAllocator prior to creation of a first ArrayBuffer. + * + * This API is experimental and may change significantly. */ - size_t ByteLength() const; + class V8EXPORT Allocator { // NOLINT + public: + virtual ~Allocator() {} + + /** + * Allocate |length| bytes. Return NULL if allocation is not successful. + */ + virtual void* Allocate(size_t length) = 0; + /** + * Free the memory pointed to |data|. That memory is guaranteed to be + * previously allocated by |Allocate|. + */ + virtual void Free(void* data) = 0; + }; + /** - * Raw pointer to the array buffer data + * The contents of an |ArrayBuffer|. Externalization of |ArrayBuffer| + * returns an instance of this class, populated, with a pointer to data + * and byte length. + * + * The Data pointer of ArrayBuffer::Contents is always allocated with + * Allocator::Allocate that is set with V8::SetArrayBufferAllocator. + * + * This API is experimental and may change significantly. */ - void* Data() const; + class V8EXPORT Contents { // NOLINT + public: + Contents() : data_(NULL), byte_length_(0) {} + + void* Data() const { return data_; } + size_t ByteLength() const { return byte_length_; } + + private: + void* data_; + size_t byte_length_; + + friend class ArrayBuffer; + }; + + + /** + * Data length in bytes. + */ + size_t ByteLength() const; /** * Create a new ArrayBuffer. Allocate |byte_length| bytes. * Allocated memory will be owned by a created ArrayBuffer and - * will be deallocated when it is garbage-collected. + * will be deallocated when it is garbage-collected, + * unless the object is externalized. */ static Local New(size_t byte_length); /** * Create a new ArrayBuffer over an existing memory block. + * The created array buffer is immediately in externalized state. * The memory block will not be reclaimed when a created ArrayBuffer * is garbage-collected. */ static Local New(void* data, size_t byte_length); + /** + * Returns true if ArrayBuffer is extrenalized, that is, does not + * own its memory block. + */ + bool IsExternal() const; + + /** + * Neuters this ArrayBuffer and all its views (typed arrays). + * Neutering sets the byte length of the buffer and all typed arrays to zero, + * preventing JavaScript from ever accessing underlying backing store. + * ArrayBuffer should have been externalized. + */ + void Neuter(); + + /** + * Make this ArrayBuffer external. The pointer to underlying memory block + * and byte length are returned as |Contents| structure. After ArrayBuffer + * had been etxrenalized, it does no longer owns the memory block. The caller + * should take steps to free memory when it is no longer needed. + * + * The memory block is guaranteed to be allocated with |Allocator::Allocate| + * that has been set with V8::SetArrayBufferAllocator. + */ + Contents Externalize(); + V8_INLINE(static ArrayBuffer* Cast(Value* obj)); + static const int kInternalFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT; + private: ArrayBuffer(); static void CheckCast(Value* obj); @@ -2735,23 +2855,33 @@ class V8EXPORT Template : public Data { template -class V8EXPORT ReturnValue { +class ReturnValue { public: - V8_INLINE(explicit ReturnValue(internal::Object** slot)); + template V8_INLINE(ReturnValue(const ReturnValue& that)) + : value_(that.value_) { + TYPE_CHECK(T, S); + } // Handle setters - V8_INLINE(void Set(const Persistent& handle)); - V8_INLINE(void Set(const Handle handle)); + template V8_INLINE(void Set(const Persistent& handle)); + template V8_INLINE(void Set(const Handle handle)); // Fast primitive setters - V8_INLINE(void Set(Isolate* isolate, bool value)); - V8_INLINE(void Set(Isolate* isolate, double i)); - V8_INLINE(void Set(Isolate* isolate, int32_t i)); - V8_INLINE(void Set(Isolate* isolate, uint32_t i)); + V8_INLINE(void Set(bool value)); + V8_INLINE(void Set(double i)); + V8_INLINE(void Set(int32_t i)); + V8_INLINE(void Set(uint32_t i)); // Fast JS primitive setters - V8_INLINE(void SetNull(Isolate* isolate)); - V8_INLINE(void SetUndefined(Isolate* isolate)); + V8_INLINE(void SetNull()); + V8_INLINE(void SetUndefined()); + V8_INLINE(void SetEmptyString()); + // Convenience getter for Isolate + V8_INLINE(Isolate* GetIsolate()); + private: - V8_INLINE(void SetTrue(Isolate* isolate)); - V8_INLINE(void SetFalse(Isolate* isolate)); + template friend class ReturnValue; + template friend class FunctionCallbackInfo; + template friend class PropertyCallbackInfo; + V8_INLINE(internal::Object* GetDefaultValue()); + V8_INLINE(explicit ReturnValue(internal::Object** slot)); internal::Object** value_; }; @@ -2763,7 +2893,7 @@ class V8EXPORT ReturnValue { * the holder of the function. */ template -class V8EXPORT FunctionCallbackInfo { +class FunctionCallbackInfo { public: V8_INLINE(int Length() const); V8_INLINE(Local operator[](int i) const); @@ -2775,16 +2905,17 @@ class V8EXPORT FunctionCallbackInfo { V8_INLINE(Isolate* GetIsolate() const); V8_INLINE(ReturnValue GetReturnValue() const); // This shouldn't be public, but the arm compiler needs it. - static const int kArgsLength = 5; + static const int kArgsLength = 6; protected: friend class internal::FunctionCallbackArguments; friend class internal::CustomArguments; static const int kReturnValueIndex = 0; - static const int kIsolateIndex = -1; - static const int kDataIndex = -2; - static const int kCalleeIndex = -3; - static const int kHolderIndex = -4; + static const int kReturnValueDefaultValueIndex = -1; + static const int kIsolateIndex = -2; + static const int kDataIndex = -3; + static const int kCalleeIndex = -4; + static const int kHolderIndex = -5; V8_INLINE(FunctionCallbackInfo(internal::Object** implicit_args, internal::Object** values, @@ -2811,7 +2942,7 @@ class V8EXPORT Arguments : public FunctionCallbackInfo { * of the property access. */ template -class V8EXPORT PropertyCallbackInfo { +class PropertyCallbackInfo { public: V8_INLINE(Isolate* GetIsolate() const); V8_INLINE(Local Data() const); @@ -2819,7 +2950,7 @@ class V8EXPORT PropertyCallbackInfo { V8_INLINE(Local Holder() const); V8_INLINE(ReturnValue GetReturnValue() const); // This shouldn't be public, but the arm compiler needs it. - static const int kArgsLength = 5; + static const int kArgsLength = 6; protected: friend class MacroAssembler; @@ -2828,8 +2959,9 @@ class V8EXPORT PropertyCallbackInfo { static const int kThisIndex = 0; static const int kHolderIndex = -1; static const int kDataIndex = -2; - static const int kIsolateIndex = -3; - static const int kReturnValueIndex = -4; + static const int kReturnValueIndex = -3; + static const int kReturnValueDefaultValueIndex = -4; + static const int kIsolateIndex = -5; V8_INLINE(PropertyCallbackInfo(internal::Object** args)) : args_(args) { } @@ -4050,7 +4182,7 @@ class V8EXPORT ExternalResourceVisitor { // NOLINT class V8EXPORT PersistentHandleVisitor { // NOLINT public: virtual ~PersistentHandleVisitor() {} - virtual void VisitPersistentHandle(Persistent value, + virtual void VisitPersistentHandle(Persistent* value, uint16_t class_id) {} }; @@ -4062,13 +4194,13 @@ class V8EXPORT PersistentHandleVisitor { // NOLINT */ class V8EXPORT AssertNoGCScope { #ifndef DEBUG - V8_INLINE(AssertNoGCScope(Isolate* isolate)) {} + // TODO(yangguo): remove isolate argument. + V8_INLINE(AssertNoGCScope(Isolate* isolate)) { } #else AssertNoGCScope(Isolate* isolate); ~AssertNoGCScope(); private: - Isolate* isolate_; - bool last_state_; + void* disallow_heap_allocation_; #endif }; @@ -4089,6 +4221,14 @@ class V8EXPORT V8 { AllowCodeGenerationFromStringsCallback that); /** + * Set allocator to use for ArrayBuffer memory. + * The allocator should be set only once. The allocator should be set + * before any code tha uses ArrayBuffers is executed. + * This allocator is used in all isolates. + */ + static void SetArrayBufferAllocator(ArrayBuffer::Allocator* allocator); + + /** * Ignore out-of-memory exceptions. * * V8 running out of memory is treated as a fatal error by default. @@ -4277,41 +4417,6 @@ class V8EXPORT V8 { static void RemoveCallCompletedCallback(CallCompletedCallback callback); /** - * Allows the host application to group objects together. If one - * object in the group is alive, all objects in the group are alive. - * After each garbage collection, object groups are removed. It is - * intended to be used in the before-garbage-collection callback - * function, for instance to simulate DOM tree connections among JS - * wrapper objects. Object groups for all dependent handles need to - * be provided for kGCTypeMarkSweepCompact collections, for all other - * garbage collection types it is sufficient to provide object groups - * for partially dependent handles only. - * See v8-profiler.h for RetainedObjectInfo interface description. - */ - // TODO(marja): deprecate AddObjectGroup. Use Isolate::SetObjectGroupId and - // HeapProfiler::SetRetainedObjectInfo instead. - static void AddObjectGroup(Persistent* objects, - size_t length, - RetainedObjectInfo* info = NULL); - static void AddObjectGroup(Isolate* isolate, - Persistent* objects, - size_t length, - RetainedObjectInfo* info = NULL); - - /** - * Allows the host application to declare implicit references between - * the objects: if |parent| is alive, all |children| are alive too. - * After each garbage collection, all implicit references - * are removed. It is intended to be used in the before-garbage-collection - * callback function. - */ - // TODO(marja): Deprecate AddImplicitReferences. Use - // Isolate::SetReferenceFromGroup instead. - static void AddImplicitReferences(Persistent parent, - Persistent* children, - size_t length); - - /** * Initializes from snapshot if possible. Otherwise, attempts to * initialize from scratch. This function is called implicitly if * you use the API without calling it first. @@ -4541,16 +4646,12 @@ class V8EXPORT V8 { static internal::Object** GlobalizeReference(internal::Isolate* isolate, internal::Object** handle); - static void DisposeGlobal(internal::Isolate* isolate, - internal::Object** global_handle); + static void DisposeGlobal(internal::Object** global_handle); typedef WeakReferenceCallbacks::Revivable RevivableCallback; - static void MakeWeak(internal::Isolate* isolate, - internal::Object** global_handle, + static void MakeWeak(internal::Object** global_handle, void* data, - RevivableCallback weak_reference_callback, - NearDeathCallback near_death_callback); - static void ClearWeak(internal::Isolate* isolate, - internal::Object** global_handle); + RevivableCallback weak_reference_callback); + static void ClearWeak(internal::Object** global_handle); template friend class Handle; template friend class Local; @@ -4890,6 +4991,7 @@ class V8EXPORT Context { explicit V8_INLINE(Scope(Handle context)) : context_(context) { context_->Enter(); } + // TODO(dcarney): deprecate V8_INLINE(Scope(Isolate* isolate, Persistent& context)) // NOLINT #ifndef V8_USE_UNSAFE_HANDLES : context_(Handle::New(isolate, context)) { @@ -5231,7 +5333,7 @@ class Internals { static const int kNullValueRootIndex = 7; static const int kTrueValueRootIndex = 8; static const int kFalseValueRootIndex = 9; - static const int kEmptyStringRootIndex = 127; + static const int kEmptyStringRootIndex = 130; static const int kNodeClassIdOffset = 1 * kApiPointerSize; static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3; @@ -5241,10 +5343,10 @@ class Internals { static const int kNodeIsIndependentShift = 4; static const int kNodeIsPartiallyDependentShift = 5; - static const int kJSObjectType = 0xae; + static const int kJSObjectType = 0xaf; static const int kFirstNonstringType = 0x80; static const int kOddballType = 0x83; - static const int kForeignType = 0x86; + static const int kForeignType = 0x87; static const int kUndefinedOddballKind = 5; static const int kNullOddballKind = 3; @@ -5438,15 +5540,8 @@ Persistent Persistent::New(Isolate* isolate, T* that) { template bool Persistent::IsIndependent() const { - return IsIndependent(Isolate::GetCurrent()); -} - - -template -bool Persistent::IsIndependent(Isolate* isolate) const { typedef internal::Internals I; if (this->IsEmpty()) return false; - if (!I::IsInitialized(isolate)) return false; return I::GetNodeFlag(reinterpret_cast(this->val_), I::kNodeIsIndependentShift); } @@ -5454,15 +5549,8 @@ bool Persistent::IsIndependent(Isolate* isolate) const { template bool Persistent::IsNearDeath() const { - return IsNearDeath(Isolate::GetCurrent()); -} - - -template -bool Persistent::IsNearDeath(Isolate* isolate) const { typedef internal::Internals I; if (this->IsEmpty()) return false; - if (!I::IsInitialized(isolate)) return false; return I::GetNodeState(reinterpret_cast(this->val_)) == I::kNodeStateIsNearDeathValue; } @@ -5470,15 +5558,8 @@ bool Persistent::IsNearDeath(Isolate* isolate) const { template bool Persistent::IsWeak() const { - return IsWeak(Isolate::GetCurrent()); -} - - -template -bool Persistent::IsWeak(Isolate* isolate) const { typedef internal::Internals I; if (this->IsEmpty()) return false; - if (!I::IsInitialized(isolate)) return false; return I::GetNodeState(reinterpret_cast(this->val_)) == I::kNodeStateIsWeakValue; } @@ -5486,15 +5567,8 @@ bool Persistent::IsWeak(Isolate* isolate) const { template void Persistent::Dispose() { - Dispose(Isolate::GetCurrent()); -} - - -template -void Persistent::Dispose(Isolate* isolate) { if (this->IsEmpty()) return; - V8::DisposeGlobal(reinterpret_cast(isolate), - reinterpret_cast(this->val_)); + V8::DisposeGlobal(reinterpret_cast(this->val_)); #ifndef V8_USE_UNSAFE_HANDLES val_ = 0; #endif @@ -5504,86 +5578,50 @@ void Persistent::Dispose(Isolate* isolate) { template template void Persistent::MakeWeak( - Isolate* isolate, P* parameters, typename WeakReferenceCallbacks::Revivable callback) { TYPE_CHECK(S, T); typedef typename WeakReferenceCallbacks::Revivable Revivable; - V8::MakeWeak(reinterpret_cast(isolate), - reinterpret_cast(this->val_), + V8::MakeWeak(reinterpret_cast(this->val_), parameters, - reinterpret_cast(callback), - NULL); + reinterpret_cast(callback)); } template template void Persistent::MakeWeak( - Isolate* isolate, P* parameters, typename WeakReferenceCallbacks::Revivable callback) { - MakeWeak(isolate, parameters, callback); + MakeWeak(parameters, callback); } template -void Persistent::MakeWeak(Isolate* isolate, - void* parameters, - NearDeathCallback callback) { - V8::MakeWeak(reinterpret_cast(isolate), - reinterpret_cast(this->val_), - parameters, - NULL, - callback); -} - -template void Persistent::ClearWeak() { - ClearWeak(Isolate::GetCurrent()); + V8::ClearWeak(reinterpret_cast(this->val_)); } -template -void Persistent::ClearWeak(Isolate* isolate) { - V8::ClearWeak(reinterpret_cast(isolate), - reinterpret_cast(this->val_)); -} template void Persistent::MarkIndependent() { - MarkIndependent(Isolate::GetCurrent()); -} - -template -void Persistent::MarkIndependent(Isolate* isolate) { typedef internal::Internals I; if (this->IsEmpty()) return; - if (!I::IsInitialized(isolate)) return; I::UpdateNodeFlag(reinterpret_cast(this->val_), true, I::kNodeIsIndependentShift); } -template -void Persistent::MarkPartiallyDependent() { - MarkPartiallyDependent(Isolate::GetCurrent()); -} template -void Persistent::MarkPartiallyDependent(Isolate* isolate) { +void Persistent::MarkPartiallyDependent() { typedef internal::Internals I; if (this->IsEmpty()) return; - if (!I::IsInitialized(isolate)) return; I::UpdateNodeFlag(reinterpret_cast(this->val_), true, I::kNodeIsPartiallyDependentShift); } -template -void Persistent::SetWrapperClassId(uint16_t class_id) { - SetWrapperClassId(Isolate::GetCurrent(), class_id); -} - template void Persistent::Reset(Isolate* isolate, const Handle& other) { @@ -5602,6 +5640,21 @@ void Persistent::Reset(Isolate* isolate, const Handle& other) { } +#ifndef V8_USE_UNSAFE_HANDLES +template +void Persistent::Reset(Isolate* isolate, const Persistent& other) { + Dispose(isolate); + if (other.IsEmpty()) { + this->val_ = NULL; + return; + } + internal::Object** p = reinterpret_cast(other.val_); + this->val_ = reinterpret_cast( + V8::GlobalizeReference(reinterpret_cast(isolate), p)); +} +#endif + + template T* Persistent::ClearAndLeak() { T* old; @@ -5617,25 +5670,19 @@ T* Persistent::ClearAndLeak() { template -void Persistent::SetWrapperClassId(Isolate* isolate, uint16_t class_id) { +void Persistent::SetWrapperClassId(uint16_t class_id) { typedef internal::Internals I; if (this->IsEmpty()) return; - if (!I::IsInitialized(isolate)) return; internal::Object** obj = reinterpret_cast(this->val_); uint8_t* addr = reinterpret_cast(obj) + I::kNodeClassIdOffset; *reinterpret_cast(addr) = class_id; } -template -uint16_t Persistent::WrapperClassId() const { - return WrapperClassId(Isolate::GetCurrent()); -} template -uint16_t Persistent::WrapperClassId(Isolate* isolate) const { +uint16_t Persistent::WrapperClassId() const { typedef internal::Internals I; if (this->IsEmpty()) return 0; - if (!I::IsInitialized(isolate)) return 0; internal::Object** obj = reinterpret_cast(this->val_); uint8_t* addr = reinterpret_cast(obj) + I::kNodeClassIdOffset; return *reinterpret_cast(addr); @@ -5646,71 +5693,94 @@ template ReturnValue::ReturnValue(internal::Object** slot) : value_(slot) {} template -void ReturnValue::Set(const Persistent& handle) { - *value_ = *reinterpret_cast(*handle); +template +void ReturnValue::Set(const Persistent& handle) { + TYPE_CHECK(T, S); + if (V8_UNLIKELY(handle.IsEmpty())) { + *value_ = GetDefaultValue(); + } else { + *value_ = *reinterpret_cast(*handle); + } } template -void ReturnValue::Set(const Handle handle) { - *value_ = *reinterpret_cast(*handle); +template +void ReturnValue::Set(const Handle handle) { + TYPE_CHECK(T, S); + if (V8_UNLIKELY(handle.IsEmpty())) { + *value_ = GetDefaultValue(); + } else { + *value_ = *reinterpret_cast(*handle); + } } template -void ReturnValue::Set(Isolate* isolate, double i) { - Set(Number::New(isolate, i)); +void ReturnValue::Set(double i) { + Set(Number::New(GetIsolate(), i)); } template -void ReturnValue::Set(Isolate* isolate, int32_t i) { +void ReturnValue::Set(int32_t i) { typedef internal::Internals I; if (V8_LIKELY(I::IsValidSmi(i))) { *value_ = I::IntToSmi(i); return; } - Set(Integer::New(i, isolate)); + Set(Integer::New(i, GetIsolate())); } template -void ReturnValue::Set(Isolate* isolate, uint32_t i) { +void ReturnValue::Set(uint32_t i) { typedef internal::Internals I; - if (V8_LIKELY(I::IsValidSmi(i))) { - *value_ = I::IntToSmi(i); + // Can't simply use INT32_MAX here for whatever reason. + bool fits_into_int32_t = (i & (1 << 31)) == 0; + if (V8_LIKELY(fits_into_int32_t)) { + Set(static_cast(i)); return; } - Set(Integer::NewFromUnsigned(i, isolate)); + Set(Integer::NewFromUnsigned(i, GetIsolate())); } template -void ReturnValue::Set(Isolate* isolate, bool value) { +void ReturnValue::Set(bool value) { + typedef internal::Internals I; + int root_index; if (value) { - SetTrue(isolate); + root_index = I::kTrueValueRootIndex; } else { - SetFalse(isolate); + root_index = I::kFalseValueRootIndex; } + *value_ = *I::GetRoot(GetIsolate(), root_index); } template -void ReturnValue::SetTrue(Isolate* isolate) { +void ReturnValue::SetNull() { typedef internal::Internals I; - *value_ = *I::GetRoot(isolate, I::kTrueValueRootIndex); + *value_ = *I::GetRoot(GetIsolate(), I::kNullValueRootIndex); } template -void ReturnValue::SetFalse(Isolate* isolate) { +void ReturnValue::SetUndefined() { typedef internal::Internals I; - *value_ = *I::GetRoot(isolate, I::kFalseValueRootIndex); + *value_ = *I::GetRoot(GetIsolate(), I::kUndefinedValueRootIndex); } template -void ReturnValue::SetNull(Isolate* isolate) { +void ReturnValue::SetEmptyString() { typedef internal::Internals I; - *value_ = *I::GetRoot(isolate, I::kNullValueRootIndex); + *value_ = *I::GetRoot(GetIsolate(), I::kEmptyStringRootIndex); } template -void ReturnValue::SetUndefined(Isolate* isolate) { - typedef internal::Internals I; - *value_ = *I::GetRoot(isolate, I::kUndefinedValueRootIndex); +Isolate* ReturnValue::GetIsolate() { + // Isolate is always the pointer below the default value on the stack. + return *reinterpret_cast(&value_[-2]); +} + +template +internal::Object* ReturnValue::GetDefaultValue() { + // Default value is always the pointer below value_ on the stack. + return value_[-1]; } @@ -5989,6 +6059,11 @@ bool Value::QuickIsString() const { } +template Value* Value::Cast(T* value) { + return static_cast(value); +} + + Symbol* Symbol::Cast(v8::Value* value) { #ifdef V8_ENABLE_CHECKS CheckCast(value); @@ -6157,6 +6232,14 @@ Float64Array* Float64Array::Cast(v8::Value* value) { } +Uint8ClampedArray* Uint8ClampedArray::Cast(v8::Value* value) { +#ifdef V8_ENABLE_CHECKS + CheckCast(value); +#endif + return static_cast(value); +} + + Function* Function::Cast(v8::Value* value) { #ifdef V8_ENABLE_CHECKS CheckCast(value); diff --git a/deps/v8/samples/lineprocessor.cc b/deps/v8/samples/lineprocessor.cc index 2ce31b4..214af05 100644 --- a/deps/v8/samples/lineprocessor.cc +++ b/deps/v8/samples/lineprocessor.cc @@ -25,10 +25,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// TODO(dcarney): remove -#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR -#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT - #include #ifdef ENABLE_DEBUGGER_SUPPORT @@ -106,8 +102,8 @@ void ReportException(v8::Isolate* isolate, v8::TryCatch* handler); v8::Handle ReadFile(const char* name); v8::Handle ReadLine(); -v8::Handle Print(const v8::Arguments& args); -v8::Handle ReadLine(const v8::Arguments& args); +void Print(const v8::FunctionCallbackInfo& args); +void ReadLine(const v8::FunctionCallbackInfo& args); bool RunCppCycle(v8::Handle script, v8::Local context, bool report_exceptions); @@ -130,7 +126,9 @@ void DispatchDebugMessages() { // think about. v8::Isolate* isolate = v8::Isolate::GetCurrent(); v8::HandleScope handle_scope(isolate); - v8::Context::Scope scope(isolate, debug_message_context); + v8::Local context = + v8::Local::New(isolate, debug_message_context); + v8::Context::Scope scope(context); v8::Debug::ProcessDebugMessages(); } @@ -220,8 +218,7 @@ int RunMain(int argc, char* argv[]) { v8::Context::Scope context_scope(context); #ifdef ENABLE_DEBUGGER_SUPPORT - debug_message_context = - v8::Persistent::New(isolate, context); + debug_message_context.Reset(isolate, context); v8::Locker locker(isolate); @@ -396,7 +393,7 @@ void ReportException(v8::Isolate* isolate, v8::TryCatch* try_catch) { // The callback that is invoked by v8 whenever the JavaScript 'print' // function is called. Prints its arguments on stdout separated by // spaces and ending with a newline. -v8::Handle Print(const v8::Arguments& args) { +void Print(const v8::FunctionCallbackInfo& args) { bool first = true; for (int i = 0; i < args.Length(); i++) { v8::HandleScope handle_scope(args.GetIsolate()); @@ -411,17 +408,17 @@ v8::Handle Print(const v8::Arguments& args) { } printf("\n"); fflush(stdout); - return v8::Undefined(); } // The callback that is invoked by v8 whenever the JavaScript 'read_line' // function is called. Reads a string from standard input and returns. -v8::Handle ReadLine(const v8::Arguments& args) { +void ReadLine(const v8::FunctionCallbackInfo& args) { if (args.Length() > 0) { - return v8::ThrowException(v8::String::New("Unexpected arguments")); + v8::ThrowException(v8::String::New("Unexpected arguments")); + return; } - return ReadLine(); + args.GetReturnValue().Set(ReadLine()); } v8::Handle ReadLine() { @@ -437,7 +434,7 @@ v8::Handle ReadLine() { } if (res == NULL) { v8::Handle t = v8::Undefined(); - return v8::Handle(v8::String::Cast(*t)); + return v8::Handle::Cast(t); } // Remove newline char for (char* pos = buffer; *pos != '\0'; pos++) { diff --git a/deps/v8/samples/process.cc b/deps/v8/samples/process.cc index fd3a821..97eec14 100644 --- a/deps/v8/samples/process.cc +++ b/deps/v8/samples/process.cc @@ -25,11 +25,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// TODO(dcarney): remove this -#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR -#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT -#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW - #include #include @@ -107,18 +102,21 @@ class JsHttpRequestProcessor : public HttpRequestProcessor { static Handle MakeMapTemplate(Isolate* isolate); // Callbacks that access the individual fields of request objects. - static Handle GetPath(Local name, const AccessorInfo& info); - static Handle GetReferrer(Local name, - const AccessorInfo& info); - static Handle GetHost(Local name, const AccessorInfo& info); - static Handle GetUserAgent(Local name, - const AccessorInfo& info); + static void GetPath(Local name, + const PropertyCallbackInfo& info); + static void GetReferrer(Local name, + const PropertyCallbackInfo& info); + static void GetHost(Local name, + const PropertyCallbackInfo& info); + static void GetUserAgent(Local name, + const PropertyCallbackInfo& info); // Callbacks that access maps - static Handle MapGet(Local name, const AccessorInfo& info); - static Handle MapSet(Local name, - Local value, - const AccessorInfo& info); + static void MapGet(Local name, + const PropertyCallbackInfo& info); + static void MapSet(Local name, + Local value, + const PropertyCallbackInfo& info); // Utility methods for wrapping C++ objects as JavaScript objects, // and going back again. @@ -142,13 +140,12 @@ class JsHttpRequestProcessor : public HttpRequestProcessor { // ------------------------- -static Handle LogCallback(const Arguments& args) { - if (args.Length() < 1) return Undefined(); +static void LogCallback(const v8::FunctionCallbackInfo& args) { + if (args.Length() < 1) return; HandleScope scope(args.GetIsolate()); Handle arg = args[0]; String::Utf8Value value(arg); HttpRequestProcessor::Log(*value); - return Undefined(); } @@ -168,11 +165,12 @@ bool JsHttpRequestProcessor::Initialize(map* opts, // is what we need for the reference to remain after we return from // this method. That persistent handle has to be disposed in the // destructor. - context_.Reset(GetIsolate(), Context::New(GetIsolate(), NULL, global)); + v8::Handle context = Context::New(GetIsolate(), NULL, global); + context_.Reset(GetIsolate(), context); // Enter the new context so all the following operations take place // within it. - Context::Scope context_scope(GetIsolate(), context_); + Context::Scope context_scope(context); // Make the options mapping available within the context if (!InstallMaps(opts, output)) @@ -185,7 +183,7 @@ bool JsHttpRequestProcessor::Initialize(map* opts, // The script compiled and ran correctly. Now we fetch out the // Process function from the global object. Handle process_name = String::New("Process"); - Handle process_val = context_->Global()->Get(process_name); + Handle process_val = context->Global()->Get(process_name); // If there is no Process function, or if it is not a function, // bail out @@ -196,7 +194,7 @@ bool JsHttpRequestProcessor::Initialize(map* opts, // Store the function in a Persistent handle, since we also want // that to remain after this call returns - process_ = Persistent::New(GetIsolate(), process_fun); + process_.Reset(GetIsolate(), process_fun); // All done; all went well return true; @@ -239,11 +237,14 @@ bool JsHttpRequestProcessor::InstallMaps(map* opts, // Wrap the map object in a JavaScript wrapper Handle opts_obj = WrapMap(opts); + v8::Local context = + v8::Local::New(GetIsolate(), context_); + // Set the options object as a property on the global object. - context_->Global()->Set(String::New("options"), opts_obj); + context->Global()->Set(String::New("options"), opts_obj); Handle output_obj = WrapMap(output); - context_->Global()->Set(String::New("output"), output_obj); + context->Global()->Set(String::New("output"), output_obj); return true; } @@ -253,9 +254,12 @@ bool JsHttpRequestProcessor::Process(HttpRequest* request) { // Create a handle scope to keep the temporary object references. HandleScope handle_scope(GetIsolate()); + v8::Local context = + v8::Local::New(GetIsolate(), context_); + // Enter this processor's context so all the remaining operations // take place there - Context::Scope context_scope(GetIsolate(), context_); + Context::Scope context_scope(context); // Wrap the C++ request object in a JavaScript wrapper Handle request_obj = WrapRequest(request); @@ -267,7 +271,9 @@ bool JsHttpRequestProcessor::Process(HttpRequest* request) { // and one argument, the request. const int argc = 1; Handle argv[argc] = { request_obj }; - Handle result = process_->Call(context_->Global(), argc, argv); + v8::Local process = + v8::Local::New(GetIsolate(), process_); + Handle result = process->Call(context->Global(), argc, argv); if (result.IsEmpty()) { String::Utf8Value error(try_catch.Exception()); Log(*error); @@ -306,7 +312,7 @@ Handle JsHttpRequestProcessor::WrapMap(map* obj) { // It only has to be created once, which we do on demand. if (map_template_.IsEmpty()) { Handle raw_template = MakeMapTemplate(GetIsolate()); - map_template_ = Persistent::New(GetIsolate(), raw_template); + map_template_.Reset(GetIsolate(), raw_template); } Handle templ = Local::New(GetIsolate(), map_template_); @@ -346,8 +352,8 @@ string ObjectToString(Local value) { } -Handle JsHttpRequestProcessor::MapGet(Local name, - const AccessorInfo& info) { +void JsHttpRequestProcessor::MapGet(Local name, + const PropertyCallbackInfo& info) { // Fetch the map wrapped by this object. map* obj = UnwrapMap(info.Holder()); @@ -358,17 +364,18 @@ Handle JsHttpRequestProcessor::MapGet(Local name, map::iterator iter = obj->find(key); // If the key is not present return an empty handle as signal - if (iter == obj->end()) return Handle(); + if (iter == obj->end()) return; // Otherwise fetch the value and wrap it in a JavaScript string const string& value = (*iter).second; - return String::New(value.c_str(), static_cast(value.length())); + info.GetReturnValue().Set( + String::New(value.c_str(), static_cast(value.length()))); } -Handle JsHttpRequestProcessor::MapSet(Local name, - Local value_obj, - const AccessorInfo& info) { +void JsHttpRequestProcessor::MapSet(Local name, + Local value_obj, + const PropertyCallbackInfo& info) { // Fetch the map wrapped by this object. map* obj = UnwrapMap(info.Holder()); @@ -380,7 +387,7 @@ Handle JsHttpRequestProcessor::MapSet(Local name, (*obj)[key] = value; // Return the value; any non-empty handle will work. - return value_obj; + info.GetReturnValue().Set(value_obj); } @@ -413,8 +420,7 @@ Handle JsHttpRequestProcessor::WrapRequest(HttpRequest* request) { // It only has to be created once, which we do on demand. if (request_template_.IsEmpty()) { Handle raw_template = MakeRequestTemplate(GetIsolate()); - request_template_ = - Persistent::New(GetIsolate(), raw_template); + request_template_.Reset(GetIsolate(), raw_template); } Handle templ = Local::New(GetIsolate(), request_template_); @@ -448,8 +454,8 @@ HttpRequest* JsHttpRequestProcessor::UnwrapRequest(Handle obj) { } -Handle JsHttpRequestProcessor::GetPath(Local name, - const AccessorInfo& info) { +void JsHttpRequestProcessor::GetPath(Local name, + const PropertyCallbackInfo& info) { // Extract the C++ request object from the JavaScript wrapper. HttpRequest* request = UnwrapRequest(info.Holder()); @@ -457,31 +463,37 @@ Handle JsHttpRequestProcessor::GetPath(Local name, const string& path = request->Path(); // Wrap the result in a JavaScript string and return it. - return String::New(path.c_str(), static_cast(path.length())); + info.GetReturnValue().Set( + String::New(path.c_str(), static_cast(path.length()))); } -Handle JsHttpRequestProcessor::GetReferrer(Local name, - const AccessorInfo& info) { +void JsHttpRequestProcessor::GetReferrer( + Local name, + const PropertyCallbackInfo& info) { HttpRequest* request = UnwrapRequest(info.Holder()); const string& path = request->Referrer(); - return String::New(path.c_str(), static_cast(path.length())); + info.GetReturnValue().Set( + String::New(path.c_str(), static_cast(path.length()))); } -Handle JsHttpRequestProcessor::GetHost(Local name, - const AccessorInfo& info) { +void JsHttpRequestProcessor::GetHost(Local name, + const PropertyCallbackInfo& info) { HttpRequest* request = UnwrapRequest(info.Holder()); const string& path = request->Host(); - return String::New(path.c_str(), static_cast(path.length())); + info.GetReturnValue().Set( + String::New(path.c_str(), static_cast(path.length()))); } -Handle JsHttpRequestProcessor::GetUserAgent(Local name, - const AccessorInfo& info) { +void JsHttpRequestProcessor::GetUserAgent( + Local name, + const PropertyCallbackInfo& info) { HttpRequest* request = UnwrapRequest(info.Holder()); const string& path = request->UserAgent(); - return String::New(path.c_str(), static_cast(path.length())); + info.GetReturnValue().Set( + String::New(path.c_str(), static_cast(path.length()))); } diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc index da18cc7..a0af931 100644 --- a/deps/v8/samples/shell.cc +++ b/deps/v8/samples/shell.cc @@ -25,11 +25,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// TODO(dcarney): remove this -#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR -#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT -#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW - #include #include #include @@ -58,11 +53,11 @@ bool ExecuteString(v8::Isolate* isolate, v8::Handle name, bool print_result, bool report_exceptions); -v8::Handle Print(const v8::Arguments& args); -v8::Handle Read(const v8::Arguments& args); -v8::Handle Load(const v8::Arguments& args); -v8::Handle Quit(const v8::Arguments& args); -v8::Handle Version(const v8::Arguments& args); +void Print(const v8::FunctionCallbackInfo& args); +void Read(const v8::FunctionCallbackInfo& args); +void Load(const v8::FunctionCallbackInfo& args); +void Quit(const v8::FunctionCallbackInfo& args); +void Version(const v8::FunctionCallbackInfo& args); v8::Handle ReadFile(const char* name); void ReportException(v8::Isolate* isolate, v8::TryCatch* handler); @@ -121,7 +116,7 @@ v8::Handle CreateShellContext(v8::Isolate* isolate) { // The callback that is invoked by v8 whenever the JavaScript 'print' // function is called. Prints its arguments on stdout separated by // spaces and ending with a newline. -v8::Handle Print(const v8::Arguments& args) { +void Print(const v8::FunctionCallbackInfo& args) { bool first = true; for (int i = 0; i < args.Length(); i++) { v8::HandleScope handle_scope(args.GetIsolate()); @@ -136,70 +131,73 @@ v8::Handle Print(const v8::Arguments& args) { } printf("\n"); fflush(stdout); - return v8::Undefined(); } // The callback that is invoked by v8 whenever the JavaScript 'read' // function is called. This function loads the content of the file named in // the argument into a JavaScript string. -v8::Handle Read(const v8::Arguments& args) { +void Read(const v8::FunctionCallbackInfo& args) { if (args.Length() != 1) { - return v8::ThrowException(v8::String::New("Bad parameters")); + v8::ThrowException(v8::String::New("Bad parameters")); + return; } v8::String::Utf8Value file(args[0]); if (*file == NULL) { - return v8::ThrowException(v8::String::New("Error loading file")); + v8::ThrowException(v8::String::New("Error loading file")); + return; } v8::Handle source = ReadFile(*file); if (source.IsEmpty()) { - return v8::ThrowException(v8::String::New("Error loading file")); + v8::ThrowException(v8::String::New("Error loading file")); + return; } - return source; + args.GetReturnValue().Set(source); } // The callback that is invoked by v8 whenever the JavaScript 'load' // function is called. Loads, compiles and executes its argument // JavaScript file. -v8::Handle Load(const v8::Arguments& args) { +void Load(const v8::FunctionCallbackInfo& args) { for (int i = 0; i < args.Length(); i++) { v8::HandleScope handle_scope(args.GetIsolate()); v8::String::Utf8Value file(args[i]); if (*file == NULL) { - return v8::ThrowException(v8::String::New("Error loading file")); + v8::ThrowException(v8::String::New("Error loading file")); + return; } v8::Handle source = ReadFile(*file); if (source.IsEmpty()) { - return v8::ThrowException(v8::String::New("Error loading file")); + v8::ThrowException(v8::String::New("Error loading file")); + return; } if (!ExecuteString(args.GetIsolate(), source, v8::String::New(*file), false, false)) { - return v8::ThrowException(v8::String::New("Error executing file")); + v8::ThrowException(v8::String::New("Error executing file")); + return; } } - return v8::Undefined(); } // The callback that is invoked by v8 whenever the JavaScript 'quit' // function is called. Quits. -v8::Handle Quit(const v8::Arguments& args) { +void Quit(const v8::FunctionCallbackInfo& args) { // If not arguments are given args[0] will yield undefined which // converts to the integer value 0. int exit_code = args[0]->Int32Value(); fflush(stdout); fflush(stderr); exit(exit_code); - return v8::Undefined(); } -v8::Handle Version(const v8::Arguments& args) { - return v8::String::New(v8::V8::GetVersion()); +void Version(const v8::FunctionCallbackInfo& args) { + args.GetReturnValue().Set(v8::String::New(v8::V8::GetVersion())); } diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc index 64047a2..e441de4 100644 --- a/deps/v8/src/accessors.cc +++ b/deps/v8/src/accessors.cc @@ -687,7 +687,7 @@ const AccessorDescriptor Accessors::FunctionArguments = { class FrameFunctionIterator { public: - FrameFunctionIterator(Isolate* isolate, const AssertNoAllocation& promise) + FrameFunctionIterator(Isolate* isolate, const DisallowHeapAllocation& promise) : frame_iterator_(isolate), functions_(2), index_(0) { @@ -734,13 +734,13 @@ class FrameFunctionIterator { MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) { Isolate* isolate = Isolate::Current(); HandleScope scope(isolate); - AssertNoAllocation no_alloc; + DisallowHeapAllocation no_allocation; JSFunction* holder = FindInstanceOf(isolate, object); if (holder == NULL) return isolate->heap()->undefined_value(); if (holder->shared()->native()) return isolate->heap()->null_value(); Handle function(holder, isolate); - FrameFunctionIterator it(isolate, no_alloc); + FrameFunctionIterator it(isolate, no_allocation); // Find the function from the frames. if (!it.Find(*function)) { @@ -793,9 +793,9 @@ const AccessorDescriptor Accessors::FunctionCaller = { // Accessors::MakeModuleExport // -static v8::Handle ModuleGetExport( +static void ModuleGetExport( v8::Local property, - const v8::AccessorInfo& info) { + const v8::PropertyCallbackInfo& info) { JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder())); Context* context = Context::cast(instance->context()); ASSERT(context->IsModuleContext()); @@ -807,16 +807,16 @@ static v8::Handle ModuleGetExport( isolate->ScheduleThrow( *isolate->factory()->NewReferenceError("not_defined", HandleVector(&name, 1))); - return v8::Handle(); + return; } - return v8::Utils::ToLocal(Handle(value, isolate)); + info.GetReturnValue().Set(v8::Utils::ToLocal(Handle(value, isolate))); } static void ModuleSetExport( v8::Local property, v8::Local value, - const v8::AccessorInfo& info) { + const v8::PropertyCallbackInfo& info) { JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder())); Context* context = Context::cast(instance->context()); ASSERT(context->IsModuleContext()); diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 7099ca8..20496fe 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -25,9 +25,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// TODO(dcarney): remove -#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT - #include "api.h" #include // For memcpy, strlen. @@ -35,6 +32,7 @@ #include "../include/v8-debug.h" #include "../include/v8-profiler.h" #include "../include/v8-testing.h" +#include "assert-scope.h" #include "bootstrapper.h" #include "code-stubs.h" #include "compiler.h" @@ -625,31 +623,22 @@ i::Object** V8::GlobalizeReference(i::Isolate* isolate, i::Object** obj) { } -void V8::MakeWeak(i::Isolate* isolate, - i::Object** object, +void V8::MakeWeak(i::Object** object, void* parameters, - RevivableCallback weak_reference_callback, - NearDeathCallback near_death_callback) { - ASSERT(isolate == i::Isolate::Current()); - LOG_API(isolate, "MakeWeak"); - isolate->global_handles()->MakeWeak(object, - parameters, - weak_reference_callback, - near_death_callback); + RevivableCallback weak_reference_callback) { + i::GlobalHandles::MakeWeak(object, + parameters, + weak_reference_callback); } -void V8::ClearWeak(i::Isolate* isolate, i::Object** obj) { - LOG_API(isolate, "ClearWeak"); - isolate->global_handles()->ClearWeakness(obj); +void V8::ClearWeak(i::Object** obj) { + i::GlobalHandles::ClearWeakness(obj); } -void V8::DisposeGlobal(i::Isolate* isolate, i::Object** obj) { - ASSERT(isolate == i::Isolate::Current()); - LOG_API(isolate, "DisposeGlobal"); - if (!isolate->IsInitialized()) return; - isolate->global_handles()->Destroy(obj); +void V8::DisposeGlobal(i::Object** obj) { + i::GlobalHandles::Destroy(obj); } // --- H a n d l e s --- @@ -686,19 +675,7 @@ HandleScope::~HandleScope() { void HandleScope::Leave() { - v8::ImplementationUtilities::HandleScopeData* current = - isolate_->handle_scope_data(); - current->level--; - ASSERT(current->level >= 0); - current->next = prev_next_; - if (current->limit != prev_limit_) { - current->limit = prev_limit_; - i::HandleScope::DeleteExtensions(isolate_); - } - -#ifdef ENABLE_EXTRA_CHECKS - i::HandleScope::ZapRange(prev_next_, prev_limit_); -#endif + return i::HandleScope::CloseScope(isolate_, prev_next_, prev_limit_); } @@ -909,7 +886,8 @@ void NeanderArray::add(i::Handle value) { int length = this->length(); int size = obj_.size(); if (length == size - 1) { - i::Handle new_elms = FACTORY->NewFixedArray(2 * size); + i::Factory* factory = i::Isolate::Current()->factory(); + i::Handle new_elms = factory->NewFixedArray(2 * size); for (int i = 0; i < length; i++) new_elms->set(i + 1, get(i)); obj_.value()->set_elements(*new_elms); @@ -985,7 +963,7 @@ void FunctionTemplate::Inherit(v8::Handle value) { template static Local FunctionTemplateNew( - Callback callback_in, + Callback callback, v8::Handle data, v8::Handle signature, int length) { @@ -1001,10 +979,8 @@ static Local FunctionTemplateNew( int next_serial_number = isolate->next_serial_number(); isolate->set_next_serial_number(next_serial_number + 1); obj->set_serial_number(i::Smi::FromInt(next_serial_number)); - if (callback_in != 0) { + if (callback != 0) { if (data.IsEmpty()) data = v8::Undefined(); - InvocationCallback callback = - i::CallbackTable::Register(isolate, callback_in); Utils::ToLocal(obj)->SetCallHandler(callback, data); } obj->set_length(length); @@ -1228,7 +1204,7 @@ int TypeSwitch::match(v8::Handle value) { template static void FunctionTemplateSetCallHandler(FunctionTemplate* function_template, - Callback callback, + Callback callback_in, v8::Handle data) { i::Isolate* isolate = Utils::OpenHandle(function_template)->GetIsolate(); if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetCallHandler()")) return; @@ -1238,6 +1214,8 @@ static void FunctionTemplateSetCallHandler(FunctionTemplate* function_template, isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE); i::Handle obj = i::Handle::cast(struct_obj); + FunctionCallback callback = + i::CallbackTable::Register(isolate, callback_in); SET_FIELD_WRAPPED(obj, set_callback, callback); if (data.IsEmpty()) data = v8::Undefined(); obj->set_data(*Utils::OpenHandle(*data)); @@ -1284,9 +1262,11 @@ static i::Handle MakeAccessorInfo( i::Isolate* isolate = Utils::OpenHandle(*name)->GetIsolate(); i::Handle obj = isolate->factory()->NewExecutableAccessorInfo(); - AccessorGetter getter = i::CallbackTable::Register(isolate, getter_in); + AccessorGetterCallback getter = + i::CallbackTable::Register(isolate, getter_in); SET_FIELD_WRAPPED(obj, set_getter, getter); - AccessorSetter setter = i::CallbackTable::Register(isolate, setter_in); + AccessorSetterCallback setter = + i::CallbackTable::Register(isolate, setter_in); SET_FIELD_WRAPPED(obj, set_setter, setter); if (data.IsEmpty()) data = v8::Undefined(); obj->set_data(*Utils::OpenHandle(*data)); @@ -1389,16 +1369,19 @@ static void SetNamedInstancePropertyHandler( i::Handle obj = i::Handle::cast(struct_obj); - NamedPropertyGetter getter = i::CallbackTable::Register(isolate, getter_in); + NamedPropertyGetterCallback getter = + i::CallbackTable::Register(isolate, getter_in); if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter); - NamedPropertySetter setter = i::CallbackTable::Register(isolate, setter_in); + NamedPropertySetterCallback setter = + i::CallbackTable::Register(isolate, setter_in); if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter); - NamedPropertyQuery query = i::CallbackTable::Register(isolate, query_in); + NamedPropertyQueryCallback query = + i::CallbackTable::Register(isolate, query_in); if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query); - NamedPropertyDeleter remover = + NamedPropertyDeleterCallback remover = i::CallbackTable::Register(isolate, remover_in); if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover); - NamedPropertyEnumerator enumerator = + NamedPropertyEnumeratorCallback enumerator = i::CallbackTable::Register(isolate, enumerator_in); if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator); @@ -1434,18 +1417,19 @@ static void SetIndexedInstancePropertyHandler( i::Handle obj = i::Handle::cast(struct_obj); - IndexedPropertyGetter getter = + IndexedPropertyGetterCallback getter = i::CallbackTable::Register(isolate, getter_in); if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter); - IndexedPropertySetter setter = + IndexedPropertySetterCallback setter = i::CallbackTable::Register(isolate, setter_in); if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter); - IndexedPropertyQuery query = i::CallbackTable::Register(isolate, query_in); + IndexedPropertyQueryCallback query = + i::CallbackTable::Register(isolate, query_in); if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query); - IndexedPropertyDeleter remover = + IndexedPropertyDeleterCallback remover = i::CallbackTable::Register(isolate, remover_in); if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover); - IndexedPropertyEnumerator enumerator = + IndexedPropertyEnumeratorCallback enumerator = i::CallbackTable::Register(isolate, enumerator_in); if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator); @@ -1471,7 +1455,7 @@ static void SetInstanceCallAsFunctionHandler( isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE); i::Handle obj = i::Handle::cast(struct_obj); - InvocationCallback callback = + FunctionCallback callback = i::CallbackTable::Register(isolate, callback_in); SET_FIELD_WRAPPED(obj, set_callback, callback); if (data.IsEmpty()) data = v8::Undefined(); @@ -3879,7 +3863,8 @@ v8::Local v8::Object::GetHiddenValue(v8::Handle key) { ENTER_V8(isolate); i::Handle self = Utils::OpenHandle(this); i::Handle key_obj = Utils::OpenHandle(*key); - i::Handle key_string = FACTORY->InternalizeString(key_obj); + i::Handle key_string = + isolate->factory()->InternalizeString(key_obj); i::Handle result(self->GetHiddenProperty(*key_string), isolate); if (result->IsUndefined()) return v8::Local(); return Utils::ToLocal(result); @@ -3893,7 +3878,8 @@ bool v8::Object::DeleteHiddenValue(v8::Handle key) { i::HandleScope scope(isolate); i::Handle self = Utils::OpenHandle(this); i::Handle key_obj = Utils::OpenHandle(*key); - i::Handle key_string = FACTORY->InternalizeString(key_obj); + i::Handle key_string = + isolate->factory()->InternalizeString(key_obj); self->DeleteHiddenProperty(*key_string); return true; } @@ -4317,6 +4303,124 @@ bool String::IsOneByte() const { return str->HasOnlyOneByteChars(); } +// Helpers for ContainsOnlyOneByteHelper +template struct OneByteMask; +template<> struct OneByteMask<4> { + static const uint32_t value = 0xFF00FF00; +}; +template<> struct OneByteMask<8> { + static const uint64_t value = V8_2PART_UINT64_C(0xFF00FF00, FF00FF00); +}; +static const uintptr_t kOneByteMask = OneByteMask::value; +static const uintptr_t kAlignmentMask = sizeof(uintptr_t) - 1; +static inline bool Unaligned(const uint16_t* chars) { + return reinterpret_cast(chars) & kAlignmentMask; +} +static inline const uint16_t* Align(const uint16_t* chars) { + return reinterpret_cast( + reinterpret_cast(chars) & ~kAlignmentMask); +} + +class ContainsOnlyOneByteHelper { + public: + ContainsOnlyOneByteHelper() : is_one_byte_(true) {} + bool Check(i::String* string) { + i::ConsString* cons_string = i::String::VisitFlat(this, string, 0); + if (cons_string == NULL) return is_one_byte_; + return CheckCons(cons_string); + } + void VisitOneByteString(const uint8_t* chars, int length) { + // Nothing to do. + } + void VisitTwoByteString(const uint16_t* chars, int length) { + // Accumulated bits. + uintptr_t acc = 0; + // Align to uintptr_t. + const uint16_t* end = chars + length; + while (Unaligned(chars) && chars != end) { + acc |= *chars++; + } + // Read word aligned in blocks, + // checking the return value at the end of each block. + const uint16_t* aligned_end = Align(end); + const int increment = sizeof(uintptr_t)/sizeof(uint16_t); + const int inner_loops = 16; + while (chars + inner_loops*increment < aligned_end) { + for (int i = 0; i < inner_loops; i++) { + acc |= *reinterpret_cast(chars); + chars += increment; + } + // Check for early return. + if ((acc & kOneByteMask) != 0) { + is_one_byte_ = false; + return; + } + } + // Read the rest. + while (chars != end) { + acc |= *chars++; + } + // Check result. + if ((acc & kOneByteMask) != 0) is_one_byte_ = false; + } + + private: + bool CheckCons(i::ConsString* cons_string) { + while (true) { + // Check left side if flat. + i::String* left = cons_string->first(); + i::ConsString* left_as_cons = + i::String::VisitFlat(this, left, 0); + if (!is_one_byte_) return false; + // Check right side if flat. + i::String* right = cons_string->second(); + i::ConsString* right_as_cons = + i::String::VisitFlat(this, right, 0); + if (!is_one_byte_) return false; + // Standard recurse/iterate trick. + if (left_as_cons != NULL && right_as_cons != NULL) { + if (left->length() < right->length()) { + CheckCons(left_as_cons); + cons_string = right_as_cons; + } else { + CheckCons(right_as_cons); + cons_string = left_as_cons; + } + // Check fast return. + if (!is_one_byte_) return false; + continue; + } + // Descend left in place. + if (left_as_cons != NULL) { + cons_string = left_as_cons; + continue; + } + // Descend right in place. + if (right_as_cons != NULL) { + cons_string = right_as_cons; + continue; + } + // Terminate. + break; + } + return is_one_byte_; + } + bool is_one_byte_; + DISALLOW_COPY_AND_ASSIGN(ContainsOnlyOneByteHelper); +}; + + +bool String::ContainsOnlyOneByte() const { + i::Handle str = Utils::OpenHandle(this); + if (IsDeadCheck(str->GetIsolate(), + "v8::String::ContainsOnlyOneByte()")) { + return false; + } + if (str->HasOnlyOneByteChars()) return true; + ContainsOnlyOneByteHelper helper; + return helper.Check(*str); +} + class Utf8LengthHelper : public i::AllStatic { public: @@ -5056,6 +5160,15 @@ void v8::V8::SetJitCodeEventHandler( isolate->logger()->SetCodeEventHandler(options, event_handler); } +void v8::V8::SetArrayBufferAllocator( + ArrayBuffer::Allocator* allocator) { + if (!ApiCheck(i::V8::ArrayBufferAllocator() == NULL, + "v8::V8::SetArrayBufferAllocator", + "ArrayBufferAllocator might only be set once")) + return; + i::V8::SetArrayBufferAllocator(allocator); +} + bool v8::V8::Dispose() { i::Isolate* isolate = i::Isolate::Current(); @@ -5107,8 +5220,9 @@ class VisitorAdapter : public i::ObjectVisitor { UNREACHABLE(); } virtual void VisitEmbedderReference(i::Object** p, uint16_t class_id) { - visitor_->VisitPersistentHandle(ToApi(i::Handle(p)), - class_id); + Value* value = ToApi(i::Handle(p)); + visitor_->VisitPersistentHandle( + reinterpret_cast*>(&value), class_id); } private: PersistentHandleVisitor* visitor_; @@ -5119,7 +5233,7 @@ void v8::V8::VisitHandlesWithClassIds(PersistentHandleVisitor* visitor) { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::V8::VisitHandlesWithClassId"); - i::AssertNoAllocation no_allocation; + i::DisallowHeapAllocation no_allocation; VisitorAdapter visitor_adapter(visitor); isolate->global_handles()->IterateAllRootsWithClassIds(&visitor_adapter); @@ -5132,7 +5246,7 @@ void v8::V8::VisitHandlesForPartialDependence( ASSERT(isolate == i::Isolate::Current()); IsDeadCheck(isolate, "v8::V8::VisitHandlesForPartialDependence"); - i::AssertNoAllocation no_allocation; + i::DisallowHeapAllocation no_allocation; VisitorAdapter visitor_adapter(visitor); isolate->global_handles()->IterateAllRootsInNewSpaceWithClassIds( @@ -5914,13 +6028,14 @@ void v8::Date::DateTimeConfigurationChangeNotification() { static i::Handle RegExpFlagsToString(RegExp::Flags flags) { + i::Isolate* isolate = i::Isolate::Current(); uint8_t flags_buf[3]; int num_flags = 0; if ((flags & RegExp::kGlobal) != 0) flags_buf[num_flags++] = 'g'; if ((flags & RegExp::kMultiline) != 0) flags_buf[num_flags++] = 'm'; if ((flags & RegExp::kIgnoreCase) != 0) flags_buf[num_flags++] = 'i'; ASSERT(num_flags <= static_cast(ARRAY_SIZE(flags_buf))); - return FACTORY->InternalizeOneByteString( + return isolate->factory()->InternalizeOneByteString( i::Vector(flags_buf, num_flags)); } @@ -6019,19 +6134,48 @@ Local Array::CloneElementAt(uint32_t index) { } -size_t v8::ArrayBuffer::ByteLength() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::ArrayBuffer::ByteLength()")) return 0; +bool v8::ArrayBuffer::IsExternal() const { + return Utils::OpenHandle(this)->is_external(); +} + +v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() { i::Handle obj = Utils::OpenHandle(this); - return static_cast(obj->byte_length()->Number()); + ApiCheck(!obj->is_external(), + "v8::ArrayBuffer::Externalize", + "ArrayBuffer already externalized"); + obj->set_is_external(true); + size_t byte_length = static_cast(obj->byte_length()->Number()); + Contents contents; + contents.data_ = obj->backing_store(); + contents.byte_length_ = byte_length; + return contents; } -void* v8::ArrayBuffer::Data() const { +void v8::ArrayBuffer::Neuter() { + i::Handle obj = Utils::OpenHandle(this); + i::Isolate* isolate = obj->GetIsolate(); + ApiCheck(obj->is_external(), + "v8::ArrayBuffer::Neuter", + "Only externalized ArrayBuffers can be neutered"); + LOG_API(obj->GetIsolate(), "v8::ArrayBuffer::Neuter()"); + ENTER_V8(isolate); + + for (i::Handle array_obj(obj->weak_first_array(), isolate); + *array_obj != i::Smi::FromInt(0);) { + i::Handle typed_array(i::JSTypedArray::cast(*array_obj)); + typed_array->Neuter(); + array_obj = i::handle(typed_array->weak_next(), isolate); + } + obj->Neuter(); +} + + +size_t v8::ArrayBuffer::ByteLength() const { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::ArrayBuffer::Data()")) return 0; + if (IsDeadCheck(isolate, "v8::ArrayBuffer::ByteLength()")) return 0; i::Handle obj = Utils::OpenHandle(this); - return obj->backing_store(); + return static_cast(obj->byte_length()->Number()); } @@ -6054,7 +6198,7 @@ Local v8::ArrayBuffer::New(void* data, size_t byte_length) { ENTER_V8(isolate); i::Handle obj = isolate->factory()->NewJSArrayBuffer(); - i::Runtime::SetupArrayBuffer(isolate, obj, data, byte_length); + i::Runtime::SetupArrayBuffer(isolate, obj, true, data, byte_length); return Utils::ToLocal(obj); } @@ -6121,6 +6265,9 @@ i::Handle NewTypedArray( obj->set_buffer(*buffer); + obj->set_weak_next(buffer->weak_first_array()); + buffer->set_weak_first_array(*obj); + i::Handle byte_offset_object = isolate->factory()->NewNumber( static_cast(byte_offset)); obj->set_byte_offset(*byte_offset_object); @@ -6265,14 +6412,12 @@ Local v8::Integer::NewFromUnsigned(uint32_t value, Isolate* isolate) { #ifdef DEBUG -v8::AssertNoGCScope::AssertNoGCScope(v8::Isolate* isolate) - : isolate_(isolate), - last_state_(i::EnterAllocationScope( - reinterpret_cast(isolate), false)) { +v8::AssertNoGCScope::AssertNoGCScope(v8::Isolate* isolate) { + disallow_heap_allocation_ = new i::DisallowHeapAllocation(); } v8::AssertNoGCScope::~AssertNoGCScope() { - i::ExitAllocationScope(reinterpret_cast(isolate_), last_state_); + delete static_cast(disallow_heap_allocation_); } #endif @@ -6359,42 +6504,6 @@ void V8::SetFailedAccessCheckCallbackFunction( } -void V8::AddObjectGroup(Persistent* objects, - size_t length, - RetainedObjectInfo* info) { - i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::V8::AddObjectGroup()")) return; - STATIC_ASSERT(sizeof(Persistent) == sizeof(i::Object**)); - isolate->global_handles()->AddObjectGroup( - reinterpret_cast(objects), length, info); -} - - -void V8::AddObjectGroup(Isolate* exported_isolate, - Persistent* objects, - size_t length, - RetainedObjectInfo* info) { - i::Isolate* isolate = reinterpret_cast(exported_isolate); - ASSERT(isolate == i::Isolate::Current()); - if (IsDeadCheck(isolate, "v8::V8::AddObjectGroup()")) return; - STATIC_ASSERT(sizeof(Persistent) == sizeof(i::Object**)); - isolate->global_handles()->AddObjectGroup( - reinterpret_cast(objects), length, info); -} - - -void V8::AddImplicitReferences(Persistent parent, - Persistent* children, - size_t length) { - i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::V8::AddImplicitReferences()")) return; - STATIC_ASSERT(sizeof(Persistent) == sizeof(i::Object**)); - isolate->global_handles()->AddImplicitReferences( - i::Handle::cast(Utils::OpenHandle(*parent)).location(), - reinterpret_cast(children), length); -} - - intptr_t Isolate::AdjustAmountOfExternalAllocatedMemory( intptr_t change_in_bytes) { i::Heap* heap = reinterpret_cast(this)->heap(); @@ -7228,6 +7337,12 @@ const CpuProfile* CpuProfiler::GetCpuProfile(int index, } +const CpuProfile* CpuProfiler::GetCpuProfile(int index) { + return reinterpret_cast( + reinterpret_cast(this)->GetProfile(NULL, index)); +} + + const CpuProfile* CpuProfiler::FindProfile(unsigned uid, Handle security_token) { i::Isolate* isolate = i::Isolate::Current(); @@ -7287,6 +7402,14 @@ const CpuProfile* CpuProfiler::StopCpuProfiling(Handle title, } +const CpuProfile* CpuProfiler::StopCpuProfiling(Handle title) { + return reinterpret_cast( + reinterpret_cast(this)->StopProfiling( + NULL, + *Utils::OpenHandle(*title))); +} + + void CpuProfiler::DeleteAllProfiles() { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfiler::DeleteAllProfiles"); @@ -7826,8 +7949,7 @@ DeferredHandles* HandleScopeImplementer::Detach(Object** prev_limit) { while (!blocks_.is_empty()) { Object** block_start = blocks_.last(); Object** block_limit = &block_start[kHandleBlockSize]; - // We should not need to check for NoHandleAllocation here. Assert - // this. + // We should not need to check for SealHandleScope here. Assert this. ASSERT(prev_limit == block_limit || !(block_start <= prev_limit && prev_limit <= block_limit)); if (prev_limit == block_limit) break; diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h index 12d6e3d..3c141f7 100644 --- a/deps/v8/src/api.h +++ b/deps/v8/src/api.h @@ -126,8 +126,9 @@ template inline T ToCData(v8::internal::Object* obj) { template inline v8::internal::Handle FromCData(T obj) { + v8::internal::Isolate* isolate = v8::internal::Isolate::Current(); STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address)); - return FACTORY->NewForeign( + return isolate->factory()->NewForeign( reinterpret_cast(reinterpret_cast(obj))); } @@ -636,8 +637,13 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) { internal::Object** block_start = blocks_.last(); internal::Object** block_limit = block_start + kHandleBlockSize; #ifdef DEBUG - // NoHandleAllocation may make the prev_limit to point inside the block. - if (block_start <= prev_limit && prev_limit <= block_limit) break; + // SealHandleScope may make the prev_limit to point inside the block. + if (block_start <= prev_limit && prev_limit <= block_limit) { +#ifdef ENABLE_EXTRA_CHECKS + internal::HandleScope::ZapRange(prev_limit, block_limit); +#endif + break; + } #else if (prev_limit == block_limit) break; #endif diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h index a80b613..f9dca11 100644 --- a/deps/v8/src/arguments.h +++ b/deps/v8/src/arguments.h @@ -52,7 +52,8 @@ class Arguments BASE_EMBEDDED { Object*& operator[] (int index) { ASSERT(0 <= index && index < length_); - return arguments_[-index]; + return *(reinterpret_cast(reinterpret_cast(arguments_) - + index * kPointerSize)); } template Handle at(int index) { @@ -152,8 +153,7 @@ class Arguments BASE_EMBEDDED { // TODO(dcarney): Remove this class when old callbacks are gone. class CallbackTable { public: - // TODO(dcarney): Flip this when it makes sense for performance. - static const bool kStoreVoidFunctions = true; + static const bool kStoreVoidFunctions = false; static inline bool ReturnsVoid(Isolate* isolate, void* function) { CallbackTable* table = isolate->callback_table(); bool contains = @@ -171,13 +171,13 @@ class CallbackTable { } #define WRITE_REGISTER(OldFunction, NewFunction) \ - static OldFunction Register(Isolate* isolate, NewFunction f) { \ - InsertCallback(isolate, FunctionToVoidPtr(f), true); \ - return reinterpret_cast(f); \ + static NewFunction Register(Isolate* isolate, OldFunction f) { \ + InsertCallback(isolate, FunctionToVoidPtr(f), false); \ + return reinterpret_cast(f); \ } \ \ - static OldFunction Register(Isolate* isolate, OldFunction f) { \ - InsertCallback(isolate, FunctionToVoidPtr(f), false); \ + static NewFunction Register(Isolate* isolate, NewFunction f) { \ + InsertCallback(isolate, FunctionToVoidPtr(f), true); \ return f; \ } FOR_EACH_CALLBACK_TABLE_MAPPING(WRITE_REGISTER) @@ -254,6 +254,10 @@ class PropertyCallbackArguments values[T::kHolderIndex] = holder; values[T::kDataIndex] = data; values[T::kIsolateIndex] = reinterpret_cast(isolate); + // Here the hole is set as default value. + // It cannot escape into js as it's remove in Call below. + values[T::kReturnValueDefaultValueIndex] = + isolate->heap()->the_hole_value(); values[T::kReturnValueIndex] = isolate->heap()->the_hole_value(); ASSERT(values[T::kHolderIndex]->IsHeapObject()); ASSERT(values[T::kIsolateIndex]->IsSmi()); @@ -314,6 +318,10 @@ class FunctionCallbackArguments values[T::kCalleeIndex] = callee; values[T::kHolderIndex] = holder; values[T::kIsolateIndex] = reinterpret_cast(isolate); + // Here the hole is set as default value. + // It cannot escape into js as it's remove in Call below. + values[T::kReturnValueDefaultValueIndex] = + isolate->heap()->the_hole_value(); values[T::kReturnValueIndex] = isolate->heap()->the_hole_value(); ASSERT(values[T::kCalleeIndex]->IsJSFunction()); ASSERT(values[T::kHolderIndex]->IsHeapObject()); diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index 0102f33..c6ea600 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -308,7 +308,7 @@ Operand::Operand(Handle handle) { #ifdef DEBUG Isolate* isolate = Isolate::Current(); #endif - ALLOW_HANDLE_DEREF(isolate, "using and embedding raw address"); + AllowDeferredHandleDereference using_raw_address; rm_ = no_reg; // Verify all Objects referred by code are NOT in new space. Object* obj = *handle; @@ -1368,6 +1368,7 @@ void Assembler::mls(Register dst, Register src1, Register src2, Register srcA, void Assembler::sdiv(Register dst, Register src1, Register src2, Condition cond) { ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc)); + ASSERT(IsEnabled(SUDIV)); emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 | src2.code()*B8 | B4 | src1.code()); } diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index 6333924..4d7bc8e 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -480,15 +480,20 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) { // Run the native code for the InternalArray function called as a normal // function. - ArrayNativeCode(masm, &generic_array_code); - - // Jump to the generic array code if the specialized code cannot handle the - // construction. - __ bind(&generic_array_code); - - Handle array_code = - masm->isolate()->builtins()->InternalArrayCodeGeneric(); - __ Jump(array_code, RelocInfo::CODE_TARGET); + if (FLAG_optimize_constructed_arrays) { + // tail call a stub + InternalArrayConstructorStub stub(masm->isolate()); + __ TailCallStub(&stub); + } else { + ArrayNativeCode(masm, &generic_array_code); + + // Jump to the generic array code if the specialized code cannot handle the + // construction. + __ bind(&generic_array_code); + Handle array_code = + masm->isolate()->builtins()->InternalArrayCodeGeneric(); + __ Jump(array_code, RelocInfo::CODE_TARGET); + } } @@ -513,15 +518,24 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) { } // Run the native code for the Array function called as a normal function. - ArrayNativeCode(masm, &generic_array_code); - - // Jump to the generic array code if the specialized code cannot handle - // the construction. - __ bind(&generic_array_code); - - Handle array_code = - masm->isolate()->builtins()->ArrayCodeGeneric(); - __ Jump(array_code, RelocInfo::CODE_TARGET); + if (FLAG_optimize_constructed_arrays) { + // tail call a stub + Handle undefined_sentinel( + masm->isolate()->heap()->undefined_value(), + masm->isolate()); + __ mov(r2, Operand(undefined_sentinel)); + ArrayConstructorStub stub(masm->isolate()); + __ TailCallStub(&stub); + } else { + ArrayNativeCode(masm, &generic_array_code); + + // Jump to the generic array code if the specialized code cannot handle + // the construction. + __ bind(&generic_array_code); + Handle array_code = + masm->isolate()->builtins()->ArrayCodeGeneric(); + __ Jump(array_code, RelocInfo::CODE_TARGET); + } } diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index c667c90..b26bf7e 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -30,7 +30,6 @@ #if defined(V8_TARGET_ARCH_ARM) #include "bootstrapper.h" -#include "builtins-decls.h" #include "code-stubs.h" #include "regexp-macro-assembler.h" #include "stub-cache.h" @@ -45,7 +44,6 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( static Register registers[] = { r3, r2, r1 }; descriptor->register_param_count_ = 3; descriptor->register_params_ = registers; - descriptor->stack_parameter_count_ = NULL; descriptor->deoptimization_handler_ = Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry; } @@ -57,7 +55,6 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( static Register registers[] = { r3, r2, r1, r0 }; descriptor->register_param_count_ = 4; descriptor->register_params_ = registers; - descriptor->stack_parameter_count_ = NULL; descriptor->deoptimization_handler_ = Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry; } @@ -80,7 +77,6 @@ void LoadFieldStub::InitializeInterfaceDescriptor( static Register registers[] = { r0 }; descriptor->register_param_count_ = 1; descriptor->register_params_ = registers; - descriptor->stack_parameter_count_ = NULL; descriptor->deoptimization_handler_ = NULL; } @@ -91,7 +87,6 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor( static Register registers[] = { r1 }; descriptor->register_param_count_ = 1; descriptor->register_params_ = registers; - descriptor->stack_parameter_count_ = NULL; descriptor->deoptimization_handler_ = NULL; } @@ -127,8 +122,8 @@ void CompareNilICStub::InitializeInterfaceDescriptor( descriptor->register_params_ = registers; descriptor->deoptimization_handler_ = FUNCTION_ADDR(CompareNilIC_Miss); - descriptor->miss_handler_ = - ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate); + descriptor->SetMissHandler( + ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate)); } @@ -150,7 +145,29 @@ static void InitializeArrayConstructorDescriptor( descriptor->register_params_ = registers; descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; descriptor->deoptimization_handler_ = - FUNCTION_ADDR(ArrayConstructor_StubFailure); + Runtime::FunctionForId(Runtime::kArrayConstructor)->entry; +} + + +static void InitializeInternalArrayConstructorDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor, + int constant_stack_parameter_count) { + // register state + // r0 -- number of arguments + // r1 -- constructor function + static Register registers[] = { r1 }; + descriptor->register_param_count_ = 1; + + if (constant_stack_parameter_count != 0) { + // stack param count needs (constructor pointer, and single argument) + descriptor->stack_parameter_count_ = &r0; + } + descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; + descriptor->register_params_ = registers; + descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; + descriptor->deoptimization_handler_ = + Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry; } @@ -175,6 +192,40 @@ void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( } +void ToBooleanStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { r0 }; + descriptor->register_param_count_ = 1; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = + FUNCTION_ADDR(ToBooleanIC_Miss); + descriptor->SetMissHandler( + ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate)); +} + + +void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0); +} + + +void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1); +} + + +void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1); +} + + #define __ ACCESS_MASM(masm) static void EmitIdenticalObjectComparison(MacroAssembler* masm, @@ -218,7 +269,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { for (int i = 0; i < param_count; ++i) { __ push(descriptor->register_params_[i]); } - ExternalReference miss = descriptor->miss_handler_; + ExternalReference miss = descriptor->miss_handler(); __ CallExternalReference(miss, descriptor->register_param_count_); } @@ -649,7 +700,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, __ cmp(r0, r1); __ b(ne, ¬_identical); - // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(), + // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), // so we do the second best thing - test it ourselves. // They are both equal and they are not both Smis so both of them are not // Smis. If it's not a heap number, then return equal. @@ -1207,116 +1258,6 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { } -// The stub expects its argument in the tos_ register and returns its result in -// it, too: zero for false, and a non-zero value for true. -void ToBooleanStub::Generate(MacroAssembler* masm) { - // This stub overrides SometimesSetsUpAFrame() to return false. That means - // we cannot call anything that could cause a GC from this stub. - Label patch; - const Register map = r9.is(tos_) ? r7 : r9; - - // undefined -> false. - CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false); - - // Boolean -> its value. - CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false); - CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true); - - // 'null' -> false. - CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false); - - if (types_.Contains(SMI)) { - // Smis: 0 -> false, all other -> true - __ SmiTst(tos_); - // tos_ contains the correct return value already - __ Ret(eq); - } else if (types_.NeedsMap()) { - // If we need a map later and have a Smi -> patch. - __ JumpIfSmi(tos_, &patch); - } - - if (types_.NeedsMap()) { - __ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset)); - - if (types_.CanBeUndetectable()) { - __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset)); - __ tst(ip, Operand(1 << Map::kIsUndetectable)); - // Undetectable -> false. - __ mov(tos_, Operand::Zero(), LeaveCC, ne); - __ Ret(ne); - } - } - - if (types_.Contains(SPEC_OBJECT)) { - // Spec object -> true. - __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE); - // tos_ contains the correct non-zero return value already. - __ Ret(ge); - } - - if (types_.Contains(STRING)) { - // String value -> false iff empty. - __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); - __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset), lt); - __ Ret(lt); // the string length is OK as the return value - } - - if (types_.Contains(HEAP_NUMBER)) { - // Heap number -> false iff +0, -0, or NaN. - Label not_heap_number; - __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); - __ b(ne, ¬_heap_number); - - __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset)); - __ VFPCompareAndSetFlags(d1, 0.0); - // "tos_" is a register, and contains a non zero value by default. - // Hence we only need to overwrite "tos_" with zero to return false for - // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. - __ mov(tos_, Operand::Zero(), LeaveCC, eq); // for FP_ZERO - __ mov(tos_, Operand::Zero(), LeaveCC, vs); // for FP_NAN - __ Ret(); - __ bind(¬_heap_number); - } - - __ bind(&patch); - GenerateTypeTransition(masm); -} - - -void ToBooleanStub::CheckOddball(MacroAssembler* masm, - Type type, - Heap::RootListIndex value, - bool result) { - if (types_.Contains(type)) { - // If we see an expected oddball, return its ToBoolean value tos_. - __ LoadRoot(ip, value); - __ cmp(tos_, ip); - // The value of a root is never NULL, so we can avoid loading a non-null - // value into tos_ when we want to return 'true'. - if (!result) { - __ mov(tos_, Operand::Zero(), LeaveCC, eq); - } - __ Ret(eq); - } -} - - -void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) { - if (!tos_.is(r3)) { - __ mov(r3, Operand(tos_)); - } - __ mov(r2, Operand(Smi::FromInt(tos_.code()))); - __ mov(r1, Operand(Smi::FromInt(types_.ToByte()))); - __ Push(r3, r2, r1); - // Patch the caller to an appropriate specialized stub and return the - // operation result to the caller of the stub. - __ TailCallExternalReference( - ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()), - 3, - 1); -} - - void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { // We don't allow a GC during a store buffer overflow so there is no need to // store the registers in any particular way, but we do have to store and @@ -1766,6 +1707,7 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, __ Ret(); if (CpuFeatures::IsSupported(SUDIV)) { + CpuFeatureScope scope(masm, SUDIV); Label result_not_zero; __ bind(&div_with_sdiv); @@ -1822,6 +1764,7 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, __ Ret(); if (CpuFeatures::IsSupported(SUDIV)) { + CpuFeatureScope scope(masm, SUDIV); __ bind(&modulo_with_sdiv); __ mov(scratch2, right); // Perform modulus with sdiv and mls. @@ -2130,7 +2073,14 @@ void BinaryOpStub_GenerateSmiCode( void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { - Label not_smis, call_runtime; + Label right_arg_changed, call_runtime; + + if (op_ == Token::MOD && has_fixed_right_arg_) { + // It is guaranteed that the value will fit into a Smi, because if it + // didn't, we wouldn't be here, see BinaryOp_Patch. + __ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value()))); + __ b(ne, &right_arg_changed); + } if (result_type_ == BinaryOpIC::UNINITIALIZED || result_type_ == BinaryOpIC::SMI) { @@ -2147,6 +2097,7 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { // Code falls through if the result is not returned as either a smi or heap // number. + __ bind(&right_arg_changed); GenerateTypeTransition(masm); __ bind(&call_runtime); @@ -2259,42 +2210,25 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { UNREACHABLE(); } - if (op_ != Token::DIV) { - // These operations produce an integer result. - // Try to return a smi if we can. - // Otherwise return a heap number if allowed, or jump to type - // transition. - - if (result_type_ <= BinaryOpIC::INT32) { - __ TryDoubleToInt32Exact(scratch1, d5, d8); - // If the ne condition is set, result does - // not fit in a 32-bit integer. - __ b(ne, &transition); - } else { - __ vcvt_s32_f64(s8, d5); - __ vmov(scratch1, s8); - } - - // Check if the result fits in a smi. - __ add(scratch2, scratch1, Operand(0x40000000), SetCC); - // If not try to return a heap number. - __ b(mi, &return_heap_number); - // Check for minus zero. Return heap number for minus zero if - // double results are allowed; otherwise transition. + if (result_type_ <= BinaryOpIC::INT32) { + __ TryDoubleToInt32Exact(scratch1, d5, d8); + // If the ne condition is set, result does + // not fit in a 32-bit integer. + __ b(ne, &transition); + // Try to tag the result as a Smi, return heap number on overflow. + __ SmiTag(scratch1, SetCC); + __ b(vs, &return_heap_number); + // Check for minus zero, transition in that case (because we need + // to return a heap number). Label not_zero; - __ cmp(scratch1, Operand::Zero()); + ASSERT(kSmiTag == 0); __ b(ne, ¬_zero); __ vmov(scratch2, d5.high()); __ tst(scratch2, Operand(HeapNumber::kSignMask)); - __ b(ne, result_type_ <= BinaryOpIC::INT32 ? &transition - : &return_heap_number); + __ b(ne, &transition); __ bind(¬_zero); - - // Tag the result and return. - __ SmiTag(r0, scratch1); + __ mov(r0, scratch1); __ Ret(); - } else { - // DIV just falls through to allocating a heap number. } __ bind(&return_heap_number); @@ -2318,6 +2252,12 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { // to type transition. } else { + if (has_fixed_right_arg_) { + __ Vmov(d8, fixed_right_arg_value(), scratch1); + __ VFPCompareAndSetFlags(d1, d8); + __ b(ne, &transition); + } + // We preserved r0 and r1 to be able to call runtime. // Save the left value on the stack. __ Push(r5, r4); @@ -4689,7 +4629,6 @@ static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) { // megamorphic. // r1 : the function to call // r2 : cache cell for call target - ASSERT(!FLAG_optimize_constructed_arrays); Label done; ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), @@ -7336,6 +7275,10 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) { ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); T stub(kind); stub.GetCode(isolate)->set_is_pregenerated(true); + if (AllocationSiteInfo::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) { + T stub1(kind, true); + stub1.GetCode(isolate)->set_is_pregenerated(true); + } } } @@ -7350,6 +7293,21 @@ void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) { } +void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime( + Isolate* isolate) { + ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS }; + for (int i = 0; i < 2; i++) { + // For internal arrays we only need a few things + InternalArrayNoArgumentConstructorStub stubh1(kinds[i]); + stubh1.GetCode(isolate)->set_is_pregenerated(true); + InternalArraySingleArgumentConstructorStub stubh2(kinds[i]); + stubh2.GetCode(isolate)->set_is_pregenerated(true); + InternalArrayNArgumentsConstructorStub stubh3(kinds[i]); + stubh3.GetCode(isolate)->set_is_pregenerated(true); + } +} + + void ArrayConstructorStub::Generate(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r0 : argc (only if argument_count_ == ANY) @@ -7436,6 +7394,105 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { } +void InternalArrayConstructorStub::GenerateCase( + MacroAssembler* masm, ElementsKind kind) { + Label not_zero_case, not_one_case; + Label normal_sequence; + + __ tst(r0, r0); + __ b(ne, ¬_zero_case); + InternalArrayNoArgumentConstructorStub stub0(kind); + __ TailCallStub(&stub0); + + __ bind(¬_zero_case); + __ cmp(r0, Operand(1)); + __ b(gt, ¬_one_case); + + if (IsFastPackedElementsKind(kind)) { + // We might need to create a holey array + // look at the first argument + __ ldr(r3, MemOperand(sp, 0)); + __ cmp(r3, Operand::Zero()); + __ b(eq, &normal_sequence); + + InternalArraySingleArgumentConstructorStub + stub1_holey(GetHoleyElementsKind(kind)); + __ TailCallStub(&stub1_holey); + } + + __ bind(&normal_sequence); + InternalArraySingleArgumentConstructorStub stub1(kind); + __ TailCallStub(&stub1); + + __ bind(¬_one_case); + InternalArrayNArgumentsConstructorStub stubN(kind); + __ TailCallStub(&stubN); +} + + +void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- r0 : argc + // -- r1 : constructor + // -- sp[0] : return address + // -- sp[4] : last argument + // ----------------------------------- + + if (FLAG_debug_code) { + // The array construct code is only set for the global and natives + // builtin Array functions which always have maps. + + // Initial map for the builtin Array function should be a map. + __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); + // Will both indicate a NULL and a Smi. + __ tst(r3, Operand(kSmiTagMask)); + __ Assert(ne, "Unexpected initial map for Array function"); + __ CompareObjectType(r3, r3, r4, MAP_TYPE); + __ Assert(eq, "Unexpected initial map for Array function"); + } + + if (FLAG_optimize_constructed_arrays) { + // Figure out the right elements kind + __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); + + // Load the map's "bit field 2" into |result|. We only need the first byte, + // but the following bit field extraction takes care of that anyway. + __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset)); + // Retrieve elements_kind from bit field 2. + __ Ubfx(r3, r3, Map::kElementsKindShift, Map::kElementsKindBitCount); + + if (FLAG_debug_code) { + Label done; + __ cmp(r3, Operand(FAST_ELEMENTS)); + __ b(eq, &done); + __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS)); + __ Assert(eq, + "Invalid ElementsKind for InternalArray or InternalPackedArray"); + __ bind(&done); + } + + Label fast_elements_case; + __ cmp(r3, Operand(FAST_ELEMENTS)); + __ b(eq, &fast_elements_case); + GenerateCase(masm, FAST_HOLEY_ELEMENTS); + + __ bind(&fast_elements_case); + GenerateCase(masm, FAST_ELEMENTS); + } else { + Label generic_constructor; + // Run the native code for the Array function called as constructor. + ArrayNativeCode(masm, &generic_constructor); + + // Jump to the generic construct code in case the specialized code cannot + // handle the construction. + __ bind(&generic_constructor); + Handle generic_construct_stub = + masm->isolate()->builtins()->JSConstructStubGeneric(); + __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); + } +} + + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index 7bf253a..5b2980a 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -504,50 +504,6 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, } -void SeqStringSetCharGenerator::Generate(MacroAssembler* masm, - String::Encoding encoding, - Register string, - Register index, - Register value) { - if (FLAG_debug_code) { - __ SmiTst(index); - __ Check(eq, "Non-smi index"); - __ SmiTst(value); - __ Check(eq, "Non-smi value"); - - __ ldr(ip, FieldMemOperand(string, String::kLengthOffset)); - __ cmp(index, ip); - __ Check(lt, "Index is too large"); - - __ cmp(index, Operand(Smi::FromInt(0))); - __ Check(ge, "Index is negative"); - - __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset)); - __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset)); - - __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask)); - static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; - static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; - __ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING - ? one_byte_seq_type : two_byte_seq_type)); - __ Check(eq, "Unexpected string type"); - } - - __ add(ip, - string, - Operand(SeqString::kHeaderSize - kHeapObjectTag)); - __ SmiUntag(value, value); - STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); - if (encoding == String::ONE_BYTE_ENCODING) { - // Smis are tagged by left shift by 1, thus LSR by 1 to smi-untag inline. - __ strb(value, MemOperand(ip, index, LSR, kSmiTagSize)); - } else { - // No need to untag a smi for two-byte addressing. - __ strh(value, MemOperand(ip, index)); // LSL(1 - kSmiTagSize). - } -} - - static MemOperand ExpConstant(int index, Register base) { return MemOperand(base, index * kDoubleSize); } diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index 75899a9..c020ab6 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -51,7 +51,7 @@ class CodeGenerator: public AstVisitor { static bool MakeCode(CompilationInfo* info); // Printing of AST, etc. as requested by flags. - static void MakeCodePrologue(CompilationInfo* info); + static void MakeCodePrologue(CompilationInfo* info, const char* kind); // Allocate and install the code. static Handle MakeCodeEpilogue(MacroAssembler* masm, diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc index d973889..ea3287a 100644 --- a/deps/v8/src/arm/deoptimizer-arm.cc +++ b/deps/v8/src/arm/deoptimizer-arm.cc @@ -48,7 +48,7 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList( JSFunction* function) { Isolate* isolate = function->GetIsolate(); HandleScope scope(isolate); - AssertNoAllocation no_allocation; + DisallowHeapAllocation no_allocation; ASSERT(function->IsOptimized()); ASSERT(function->FunctionsInFunctionListShareSameCode()); diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index 33a499c..8b24bf1 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -678,8 +678,8 @@ void FullCodeGenerator::DoTest(Expression* condition, Label* if_true, Label* if_false, Label* fall_through) { - ToBooleanStub stub(result_register()); - __ CallStub(&stub, condition->test_id()); + Handle ic = ToBooleanStub::GetUninitialized(isolate()); + CallIC(ic, RelocInfo::CODE_TARGET, condition->test_id()); __ tst(result_register(), result_register()); Split(ne, if_true, if_false, fall_through); } @@ -1081,9 +1081,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { ForIn loop_statement(this, stmt); increment_loop_depth(); - // Get the object to enumerate over. Both SpiderMonkey and JSC - // ignore null and undefined in contrast to the specification; see - // ECMA-262 section 12.6.4. + // Get the object to enumerate over. If the object is null or undefined, skip + // over the loop. See ECMA-262 version 5, section 12.6.4. VisitForAccumulatorValue(stmt->enumerable()); __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); __ cmp(r0, ip); @@ -1259,6 +1258,65 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { } +void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) { + Comment cmnt(masm_, "[ ForOfStatement"); + SetStatementPosition(stmt); + + Iteration loop_statement(this, stmt); + increment_loop_depth(); + + // var iterator = iterable[@@iterator]() + VisitForAccumulatorValue(stmt->assign_iterator()); + + // As with for-in, skip the loop if the iterator is null or undefined. + __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); + __ b(eq, loop_statement.break_label()); + __ CompareRoot(r0, Heap::kNullValueRootIndex); + __ b(eq, loop_statement.break_label()); + + // Convert the iterator to a JS object. + Label convert, done_convert; + __ JumpIfSmi(r0, &convert); + __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE); + __ b(ge, &done_convert); + __ bind(&convert); + __ push(r0); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ bind(&done_convert); + __ push(r0); + + // Loop entry. + __ bind(loop_statement.continue_label()); + + // result = iterator.next() + VisitForEffect(stmt->next_result()); + + // if (result.done) break; + Label result_not_done; + VisitForControl(stmt->result_done(), + loop_statement.break_label(), + &result_not_done, + &result_not_done); + __ bind(&result_not_done); + + // each = result.value + VisitForEffect(stmt->assign_each()); + + // Generate code for the body of the loop. + Visit(stmt->body()); + + // Check stack before looping. + PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS); + EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label()); + __ jmp(loop_statement.continue_label()); + + // Exit and decrement the loop depth. + PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); + __ bind(loop_statement.break_label()); + decrement_loop_depth(); +} + + void FullCodeGenerator::EmitNewClosure(Handle info, bool pretenure) { // Use the fast case closure allocation code that allocates in new @@ -1971,10 +2029,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) { // [sp + 1 * kPointerSize] iter // [sp + 0 * kPointerSize] g - Label l_catch, l_try, l_resume, l_send, l_call, l_loop; + Label l_catch, l_try, l_resume, l_next, l_call, l_loop; // Initial send value is undefined. __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); - __ b(&l_send); + __ b(&l_next); // catch (e) { receiver = iter; f = iter.throw; arg = e; goto l_call; } __ bind(&l_catch); @@ -1983,11 +2041,9 @@ void FullCodeGenerator::VisitYield(Yield* expr) { __ push(r3); // iter __ push(r0); // exception __ mov(r0, r3); // iter - __ push(r0); // push LoadIC state __ LoadRoot(r2, Heap::kthrow_stringRootIndex); // "throw" Handle throw_ic = isolate()->builtins()->LoadIC_Initialize(); CallIC(throw_ic); // iter.throw in r0 - __ add(sp, sp, Operand(kPointerSize)); // drop LoadIC state __ jmp(&l_call); // try { received = yield result.value } @@ -2007,17 +2063,15 @@ void FullCodeGenerator::VisitYield(Yield* expr) { __ bind(&l_resume); // received in r0 __ PopTryHandler(); - // receiver = iter; f = iter.send; arg = received; - __ bind(&l_send); + // receiver = iter; f = iter.next; arg = received; + __ bind(&l_next); __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter __ push(r3); // iter __ push(r0); // received __ mov(r0, r3); // iter - __ push(r0); // push LoadIC state - __ LoadRoot(r2, Heap::ksend_stringRootIndex); // "send" - Handle send_ic = isolate()->builtins()->LoadIC_Initialize(); - CallIC(send_ic); // iter.send in r0 - __ add(sp, sp, Operand(kPointerSize)); // drop LoadIC state + __ LoadRoot(r2, Heap::knext_stringRootIndex); // "next" + Handle next_ic = isolate()->builtins()->LoadIC_Initialize(); + CallIC(next_ic); // iter.next in r0 // result = f.call(receiver, arg); __ bind(&l_call); @@ -2045,13 +2099,11 @@ void FullCodeGenerator::VisitYield(Yield* expr) { __ pop(r1); // result __ push(r0); // result.value __ mov(r0, r1); // result - __ push(r0); // push LoadIC state __ LoadRoot(r2, Heap::kdone_stringRootIndex); // "done" Handle done_ic = isolate()->builtins()->LoadIC_Initialize(); CallIC(done_ic); // result.done in r0 - __ add(sp, sp, Operand(kPointerSize)); // drop LoadIC state - ToBooleanStub stub(r0); - __ CallStub(&stub); + Handle bool_ic = ToBooleanStub::GetUninitialized(isolate()); + CallIC(bool_ic); __ cmp(r0, Operand(0)); __ b(eq, &l_try); @@ -2122,7 +2174,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator, // If we are sending a value and there is no operand stack, we can jump back // in directly. - if (resume_mode == JSGeneratorObject::SEND) { + if (resume_mode == JSGeneratorObject::NEXT) { Label slow_resume; __ cmp(r3, Operand(0)); __ b(ne, &slow_resume); @@ -3013,7 +3065,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( // string "valueOf" the result is false. // The use of ip to store the valueOf string assumes that it is not otherwise // used in the loop below. - __ mov(ip, Operand(FACTORY->value_of_string())); + __ mov(ip, Operand(isolate()->factory()->value_of_string())); __ jmp(&entry); __ bind(&loop); __ ldr(r3, MemOperand(r4, 0)); @@ -3425,19 +3477,56 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) { } +void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string, + Register index, + Register value, + uint32_t encoding_mask) { + __ SmiTst(index); + __ Check(eq, "Non-smi index"); + __ SmiTst(value); + __ Check(eq, "Non-smi value"); + + __ ldr(ip, FieldMemOperand(string, String::kLengthOffset)); + __ cmp(index, ip); + __ Check(lt, "Index is too large"); + + __ cmp(index, Operand(Smi::FromInt(0))); + __ Check(ge, "Index is negative"); + + __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset)); + __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset)); + + __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask)); + __ cmp(ip, Operand(encoding_mask)); + __ Check(eq, "Unexpected string type"); +} + + void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) { ZoneList* args = expr->arguments(); ASSERT_EQ(3, args->length()); + Register string = r0; + Register index = r1; + Register value = r2; + VisitForStackValue(args->at(1)); // index VisitForStackValue(args->at(2)); // value - __ pop(r2); - __ pop(r1); + __ pop(value); + __ pop(index); VisitForAccumulatorValue(args->at(0)); // string - static const String::Encoding encoding = String::ONE_BYTE_ENCODING; - SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2); - context()->Plug(r0); + if (FLAG_debug_code) { + static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; + EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type); + } + + __ SmiUntag(value, value); + __ add(ip, + string, + Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + __ strb(value, MemOperand(ip, index, LSR, kSmiTagSize)); + context()->Plug(string); } @@ -3445,15 +3534,28 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) { ZoneList* args = expr->arguments(); ASSERT_EQ(3, args->length()); + Register string = r0; + Register index = r1; + Register value = r2; + VisitForStackValue(args->at(1)); // index VisitForStackValue(args->at(2)); // value - __ pop(r2); - __ pop(r1); + __ pop(value); + __ pop(index); VisitForAccumulatorValue(args->at(0)); // string - static const String::Encoding encoding = String::TWO_BYTE_ENCODING; - SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2); - context()->Plug(r0); + if (FLAG_debug_code) { + static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; + EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type); + } + + __ SmiUntag(value, value); + __ add(ip, + string, + Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); + __ strh(value, MemOperand(ip, index)); + context()->Plug(string); } @@ -4663,9 +4765,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, VisitForAccumulatorValue(sub_expr); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); - EqualityKind kind = expr->op() == Token::EQ_STRICT - ? kStrictEquality : kNonStrictEquality; - if (kind == kStrictEquality) { + if (expr->op() == Token::EQ_STRICT) { Heap::RootListIndex nil_value = nil == kNullValue ? Heap::kNullValueRootIndex : Heap::kUndefinedValueRootIndex; @@ -4673,9 +4773,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, __ cmp(r0, r1); Split(eq, if_true, if_false, fall_through); } else { - Handle ic = CompareNilICStub::GetUninitialized(isolate(), - kNonStrictEquality, - nil); + Handle ic = CompareNilICStub::GetUninitialized(isolate(), nil); CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId()); __ cmp(r0, Operand(0)); Split(ne, if_true, if_false, fall_through); diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index 14c4794..87865b2 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -646,15 +646,11 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) { } -// Defined in ic.cc. -Object* LoadIC_Miss(Arguments args); - void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r2 : name // -- lr : return address // -- r0 : receiver - // -- sp[0] : receiver // ----------------------------------- // Probe the stub cache. @@ -674,7 +670,6 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) { // -- r2 : name // -- lr : return address // -- r0 : receiver - // -- sp[0] : receiver // ----------------------------------- Label miss; @@ -695,7 +690,6 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) { // -- r2 : name // -- lr : return address // -- r0 : receiver - // -- sp[0] : receiver // ----------------------------------- Isolate* isolate = masm->isolate(); @@ -711,6 +705,20 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) { } +void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { + // ---------- S t a t e -------------- + // -- r2 : name + // -- lr : return address + // -- r0 : receiver + // ----------------------------------- + + __ mov(r3, r0); + __ Push(r3, r2); + + __ TailCallRuntime(Runtime::kGetProperty, 2, 1); +} + + static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm, Register object, Register key, @@ -878,9 +886,6 @@ void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm, } -Object* KeyedLoadIC_Miss(Arguments args); - - void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) { // ---------- S t a t e -------------- // -- lr : return address diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index e1bb69e..fbb9c6e 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -369,8 +369,7 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { void LStoreNamedField::PrintDataTo(StringStream* stream) { object()->PrintTo(stream); - stream->Add("."); - stream->Add(*String::cast(*name())->ToCString()); + hydrogen()->access().PrintTo(stream); stream->Add(" <- "); value()->PrintTo(stream); } @@ -406,7 +405,14 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) { } else { stream->Add("] <- "); } - value()->PrintTo(stream); + + if (value() == NULL) { + ASSERT(hydrogen()->IsConstantHoleStore() && + hydrogen()->value()->representation().IsDouble()); + stream->Add(""); + } else { + value()->PrintTo(stream); + } } @@ -699,6 +705,12 @@ LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) { } +LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) { + UNREACHABLE(); + return NULL; +} + + LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) { return AssignEnvironment(new(zone()) LDeoptimize); } @@ -711,9 +723,9 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) { LInstruction* LChunkBuilder::DoShift(Token::Value op, HBitwiseBinaryOperation* instr) { - if (instr->representation().IsTagged()) { - ASSERT(instr->left()->representation().IsTagged()); - ASSERT(instr->right()->representation().IsTagged()); + if (instr->representation().IsSmiOrTagged()) { + ASSERT(instr->left()->representation().IsSmiOrTagged()); + ASSERT(instr->right()->representation().IsSmiOrTagged()); LOperand* left = UseFixed(instr->left(), r1); LOperand* right = UseFixed(instr->right(), r0); @@ -781,8 +793,8 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op, op == Token::SUB); HValue* left = instr->left(); HValue* right = instr->right(); - ASSERT(left->representation().IsTagged()); - ASSERT(right->representation().IsTagged()); + ASSERT(left->representation().IsSmiOrTagged()); + ASSERT(right->representation().IsSmiOrTagged()); LOperand* left_operand = UseFixed(left, r1); LOperand* right_operand = UseFixed(right, r0); LArithmeticT* result = @@ -1304,9 +1316,9 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); return DefineAsRegister(new(zone()) LBitI(left, right)); } else { - ASSERT(instr->representation().IsTagged()); - ASSERT(instr->left()->representation().IsTagged()); - ASSERT(instr->right()->representation().IsTagged()); + ASSERT(instr->representation().IsSmiOrTagged()); + ASSERT(instr->left()->representation().IsSmiOrTagged()); + ASSERT(instr->right()->representation().IsSmiOrTagged()); LOperand* left = UseFixed(instr->left(), r1); LOperand* right = UseFixed(instr->right(), r0); @@ -1333,18 +1345,14 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero)); LOperand* value = UseRegisterAtStart(instr->left()); LDivI* div = - new(zone()) LDivI(value, UseOrConstant(instr->right())); + new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL); return AssignEnvironment(DefineSameAsFirst(div)); } - // TODO(1042) The fixed register allocation - // is needed because we call TypeRecordingBinaryOpStub from - // the generated code, which requires registers r0 - // and r1 to be used. We should remove that - // when we provide a native implementation. - LOperand* dividend = UseFixed(instr->left(), r0); - LOperand* divisor = UseFixed(instr->right(), r1); - return AssignEnvironment(AssignPointerMap( - DefineFixed(new(zone()) LDivI(dividend, divisor), r0))); + LOperand* dividend = UseRegister(instr->left()); + LOperand* divisor = UseRegister(instr->right()); + LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4); + LDivI* div = new(zone()) LDivI(dividend, divisor, temp); + return AssignEnvironment(DefineAsRegister(div)); } else { return DoArithmeticT(Token::DIV, instr); } @@ -1434,43 +1442,61 @@ LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { LInstruction* LChunkBuilder::DoMod(HMod* instr) { + HValue* left = instr->left(); + HValue* right = instr->right(); if (instr->representation().IsInteger32()) { - ASSERT(instr->left()->representation().IsInteger32()); - ASSERT(instr->right()->representation().IsInteger32()); - - LModI* mod; + ASSERT(left->representation().IsInteger32()); + ASSERT(right->representation().IsInteger32()); if (instr->HasPowerOf2Divisor()) { - ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero)); - LOperand* value = UseRegisterAtStart(instr->left()); - mod = new(zone()) LModI(value, UseOrConstant(instr->right())); - } else { - LOperand* dividend = UseRegister(instr->left()); - LOperand* divisor = UseRegister(instr->right()); - mod = new(zone()) LModI(dividend, - divisor, - TempRegister(), - FixedTemp(d10), - FixedTemp(d11)); - } - - if (instr->CheckFlag(HValue::kBailoutOnMinusZero) || - instr->CheckFlag(HValue::kCanBeDivByZero) || - instr->CheckFlag(HValue::kCanOverflow)) { + ASSERT(!right->CanBeZero()); + LModI* mod = new(zone()) LModI(UseRegisterAtStart(left), + UseOrConstant(right)); + LInstruction* result = DefineAsRegister(mod); + return (left->CanBeNegative() && + instr->CheckFlag(HValue::kBailoutOnMinusZero)) + ? AssignEnvironment(result) + : result; + } else if (instr->has_fixed_right_arg()) { + LModI* mod = new(zone()) LModI(UseRegisterAtStart(left), + UseRegisterAtStart(right)); return AssignEnvironment(DefineAsRegister(mod)); + } else if (CpuFeatures::IsSupported(SUDIV)) { + LModI* mod = new(zone()) LModI(UseRegister(left), + UseRegister(right)); + LInstruction* result = DefineAsRegister(mod); + return (right->CanBeZero() || + (left->RangeCanInclude(kMinInt) && + right->RangeCanInclude(-1) && + instr->CheckFlag(HValue::kBailoutOnMinusZero)) || + (left->CanBeNegative() && + instr->CanBeZero() && + instr->CheckFlag(HValue::kBailoutOnMinusZero))) + ? AssignEnvironment(result) + : result; } else { - return DefineAsRegister(mod); + LModI* mod = new(zone()) LModI(UseRegister(left), + UseRegister(right), + FixedTemp(d10), + FixedTemp(d11)); + LInstruction* result = DefineAsRegister(mod); + return (right->CanBeZero() || + (left->CanBeNegative() && + instr->CanBeZero() && + instr->CheckFlag(HValue::kBailoutOnMinusZero))) + ? AssignEnvironment(result) + : result; } - } else if (instr->representation().IsTagged()) { + } else if (instr->representation().IsSmiOrTagged()) { return DoArithmeticT(Token::MOD, instr); } else { ASSERT(instr->representation().IsDouble()); - // We call a C function for double modulo. It can't trigger a GC. - // We need to use fixed result register for the call. + // We call a C function for double modulo. It can't trigger a GC. We need + // to use fixed result register for the call. // TODO(fschneider): Allow any register as input registers. - LOperand* left = UseFixedDouble(instr->left(), d1); - LOperand* right = UseFixedDouble(instr->right(), d2); - LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right); - return MarkAsCall(DefineFixedDouble(result, d1), instr); + LArithmeticD* mod = new(zone()) LArithmeticD(Token::MOD, + UseFixedDouble(left, d1), + UseFixedDouble(right, d2)); + return MarkAsCall(DefineFixedDouble(mod, d1), instr); } } @@ -1618,7 +1644,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { return DoArithmeticD(Token::ADD, instr); } else { - ASSERT(instr->representation().IsTagged()); + ASSERT(instr->representation().IsSmiOrTagged()); return DoArithmeticT(Token::ADD, instr); } } @@ -1682,9 +1708,10 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { LInstruction* LChunkBuilder::DoCompareIDAndBranch( HCompareIDAndBranch* instr) { Representation r = instr->representation(); - if (r.IsInteger32()) { - ASSERT(instr->left()->representation().IsInteger32()); - ASSERT(instr->right()->representation().IsInteger32()); + if (r.IsSmiOrInteger32()) { + ASSERT(instr->left()->representation().IsSmiOrInteger32()); + ASSERT(instr->left()->representation().Equals( + instr->right()->representation())); LOperand* left = UseRegisterOrConstantAtStart(instr->left()); LOperand* right = UseRegisterOrConstantAtStart(instr->right()); return new(zone()) LCmpIDAndBranch(left, right); @@ -1887,12 +1914,26 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) { LInstruction* LChunkBuilder::DoChange(HChange* instr) { Representation from = instr->from(); Representation to = instr->to(); + if (from.IsSmi()) { + if (to.IsTagged()) { + LOperand* value = UseRegister(instr->value()); + return DefineSameAsFirst(new(zone()) LDummyUse(value)); + } + from = Representation::Tagged(); + } if (from.IsTagged()) { if (to.IsDouble()) { info()->MarkAsDeferredCalling(); LOperand* value = UseRegister(instr->value()); LNumberUntagD* res = new(zone()) LNumberUntagD(value); return AssignEnvironment(DefineAsRegister(res)); + } else if (to.IsSmi()) { + HValue* val = instr->value(); + LOperand* value = UseRegister(val); + if (val->type().IsSmi()) { + return DefineSameAsFirst(new(zone()) LDummyUse(value)); + } + return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value))); } else { ASSERT(to.IsInteger32()); LOperand* value = NULL; @@ -1927,6 +1968,10 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2); Define(result, result_temp); return AssignPointerMap(result); + } else if (to.IsSmi()) { + LOperand* value = UseRegister(instr->value()); + return AssignEnvironment(DefineAsRegister(new(zone()) LDoubleToSmi(value, + TempRegister(), TempRegister()))); } else { ASSERT(to.IsInteger32()); LOperand* value = UseRegister(instr->value()); @@ -1949,6 +1994,15 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { LNumberTagI* result = new(zone()) LNumberTagI(value); return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); } + } else if (to.IsSmi()) { + HValue* val = instr->value(); + LOperand* value = UseRegister(val); + LInstruction* result = + DefineSameAsFirst(new(zone()) LInteger32ToSmi(value)); + if (val->HasRange() && val->range()->IsInSmiRange()) { + return result; + } + return AssignEnvironment(result); } else { ASSERT(to.IsDouble()); if (instr->value()->CheckFlag(HInstruction::kUint32)) { @@ -1986,18 +2040,6 @@ LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) { } -LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new(zone()) LCheckSmi(value)); -} - - -LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) { - LOperand* value = UseRegisterAtStart(instr->value()); - return AssignEnvironment(new(zone()) LCheckSmi(value)); -} - - LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) { LOperand* value = UseRegisterAtStart(instr->value()); return AssignEnvironment(new(zone()) LCheckFunction(value)); @@ -2020,7 +2062,7 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) { } else if (input_rep.IsInteger32()) { return DefineAsRegister(new(zone()) LClampIToUint8(reg)); } else { - ASSERT(input_rep.IsTagged()); + ASSERT(input_rep.IsSmiOrTagged()); // Register allocator doesn't (yet) support allocation of double // temps. Reserve d1 explicitly. LClampTToUint8* result = new(zone()) LClampTToUint8(reg, FixedTemp(d11)); @@ -2038,7 +2080,9 @@ LInstruction* LChunkBuilder::DoReturn(HReturn* instr) { LInstruction* LChunkBuilder::DoConstant(HConstant* instr) { Representation r = instr->representation(); - if (r.IsInteger32()) { + if (r.IsSmi()) { + return DefineAsRegister(new(zone()) LConstantS); + } else if (r.IsInteger32()) { return DefineAsRegister(new(zone()) LConstantI); } else if (r.IsDouble()) { return DefineAsRegister(new(zone()) LConstantD); @@ -2154,7 +2198,7 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer( LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); + instr->key()->representation().IsSmi()); ElementsKind elements_kind = instr->elements_kind(); LOperand* key = UseRegisterOrConstantAtStart(instr->key()); LLoadKeyed* result = NULL; @@ -2164,7 +2208,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { if (instr->representation().IsDouble()) { obj = UseTempRegister(instr->elements()); } else { - ASSERT(instr->representation().IsTagged()); + ASSERT(instr->representation().IsSmiOrTagged()); obj = UseRegisterAtStart(instr->elements()); } result = new(zone()) LLoadKeyed(obj, key); @@ -2214,7 +2258,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { val = UseTempRegister(instr->value()); key = UseRegisterOrConstantAtStart(instr->key()); } else { - ASSERT(instr->value()->representation().IsTagged()); + ASSERT(instr->value()->representation().IsSmiOrTagged()); object = UseTempRegister(instr->elements()); val = needs_write_barrier ? UseTempRegister(instr->value()) : UseRegisterAtStart(instr->value()); @@ -2293,13 +2337,14 @@ LInstruction* LChunkBuilder::DoTrapAllocationMemento( LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { + bool is_in_object = instr->access().IsInobject(); bool needs_write_barrier = instr->NeedsWriteBarrier(); bool needs_write_barrier_for_map = !instr->transition().is_null() && instr->NeedsWriteBarrierForMap(); LOperand* obj; if (needs_write_barrier) { - obj = instr->is_in_object() + obj = is_in_object ? UseRegister(instr->object()) : UseTempRegister(instr->object()); } else { @@ -2323,10 +2368,11 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL; LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp); - if ((FLAG_track_fields && instr->field_representation().IsSmi()) || - (FLAG_track_heap_object_fields && - instr->field_representation().IsHeapObject())) { - return AssignEnvironment(result); + if (FLAG_track_heap_object_fields && + instr->field_representation().IsHeapObject()) { + if (!instr->value()->type().IsHeapObject()) { + return AssignEnvironment(result); + } } return result; } @@ -2370,14 +2416,6 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) { } -LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) { - info()->MarkAsDeferredCalling(); - LAllocateObject* result = - new(zone()) LAllocateObject(TempRegister(), TempRegister()); - return AssignPointerMap(DefineAsRegister(result)); -} - - LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) { info()->MarkAsDeferredCalling(); LOperand* size = instr->size()->IsConstant() @@ -2467,7 +2505,7 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { index = UseOrConstant(instr->index()); } else { length = UseTempRegister(instr->length()); - index = Use(instr->index()); + index = UseRegisterAtStart(instr->index()); } return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index)); } diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h index 9bcd44a..ccfd0db 100644 --- a/deps/v8/src/arm/lithium-arm.h +++ b/deps/v8/src/arm/lithium-arm.h @@ -49,7 +49,6 @@ class LCodeGen; #define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ V(AccessArgumentsAt) \ V(AddI) \ - V(AllocateObject) \ V(Allocate) \ V(ApplyArguments) \ V(ArgumentsElements) \ @@ -87,6 +86,7 @@ class LCodeGen; V(CmpT) \ V(ConstantD) \ V(ConstantI) \ + V(ConstantS) \ V(ConstantT) \ V(Context) \ V(DebugBreak) \ @@ -95,6 +95,7 @@ class LCodeGen; V(Deoptimize) \ V(DivI) \ V(DoubleToI) \ + V(DoubleToSmi) \ V(DummyUse) \ V(ElementsKind) \ V(FixedArrayBaseLength) \ @@ -111,6 +112,7 @@ class LCodeGen; V(InstanceSize) \ V(InstructionGap) \ V(Integer32ToDouble) \ + V(Integer32ToSmi) \ V(Uint32ToDouble) \ V(InvokeFunction) \ V(IsConstructCallAndBranch) \ @@ -573,51 +575,39 @@ class LArgumentsElements: public LTemplateInstruction<1, 0, 0> { }; -class LModI: public LTemplateInstruction<1, 2, 3> { +class LModI: public LTemplateInstruction<1, 2, 2> { public: - // Used when the right hand is a constant power of 2. - LModI(LOperand* left, - LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - temps_[0] = NULL; - temps_[1] = NULL; - temps_[2] = NULL; - } - - // Used for the standard case. LModI(LOperand* left, LOperand* right, - LOperand* temp, - LOperand* temp2, - LOperand* temp3) { + LOperand* temp = NULL, + LOperand* temp2 = NULL) { inputs_[0] = left; inputs_[1] = right; temps_[0] = temp; temps_[1] = temp2; - temps_[2] = temp3; } LOperand* left() { return inputs_[0]; } LOperand* right() { return inputs_[1]; } LOperand* temp() { return temps_[0]; } LOperand* temp2() { return temps_[1]; } - LOperand* temp3() { return temps_[2]; } DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i") DECLARE_HYDROGEN_ACCESSOR(Mod) }; -class LDivI: public LTemplateInstruction<1, 2, 0> { +class LDivI: public LTemplateInstruction<1, 2, 1> { public: - LDivI(LOperand* left, LOperand* right) { + LDivI(LOperand* left, LOperand* right, LOperand* temp) { inputs_[0] = left; inputs_[1] = right; + temps_[0] = temp; } LOperand* left() { return inputs_[0]; } LOperand* right() { return inputs_[1]; } + LOperand* temp() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") DECLARE_HYDROGEN_ACCESSOR(Div) @@ -1204,6 +1194,15 @@ class LConstantI: public LTemplateInstruction<1, 0, 0> { }; +class LConstantS: public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s") + DECLARE_HYDROGEN_ACCESSOR(Constant) + + Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); } +}; + + class LConstantD: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d") @@ -1954,6 +1953,19 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> { }; +class LInteger32ToSmi: public LTemplateInstruction<1, 1, 0> { + public: + explicit LInteger32ToSmi(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi") + DECLARE_HYDROGEN_ACCESSOR(Change) +}; + + class LUint32ToDouble: public LTemplateInstruction<1, 1, 0> { public: explicit LUint32ToDouble(LOperand* value) { @@ -2007,6 +2019,25 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> { }; +class LDoubleToSmi: public LTemplateInstruction<1, 1, 2> { + public: + LDoubleToSmi(LOperand* value, LOperand* temp, LOperand* temp2) { + inputs_[0] = value; + temps_[0] = temp; + temps_[1] = temp2; + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi") + DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) + + bool truncating() { return hydrogen()->CanTruncateToInt32(); } +}; + + // Sometimes truncating conversion from a tagged value to an int32. class LDoubleToI: public LTemplateInstruction<1, 1, 2> { public: @@ -2111,9 +2142,6 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> { virtual void PrintDataTo(StringStream* stream); - Handle name() const { return hydrogen()->name(); } - bool is_in_object() { return hydrogen()->is_in_object(); } - int offset() { return hydrogen()->offset(); } Handle transition() const { return hydrogen()->transition(); } Representation representation() const { return hydrogen()->field_representation(); @@ -2352,7 +2380,7 @@ class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> { }; -class LCheckSmi: public LTemplateInstruction<0, 1, 0> { +class LCheckSmi: public LTemplateInstruction<1, 1, 0> { public: explicit LCheckSmi(LOperand* value) { inputs_[0] = value; @@ -2416,21 +2444,6 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> { }; -class LAllocateObject: public LTemplateInstruction<1, 1, 2> { - public: - LAllocateObject(LOperand* temp, LOperand* temp2) { - temps_[0] = temp; - temps_[1] = temp2; - } - - LOperand* temp() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object") - DECLARE_HYDROGEN_ACCESSOR(AllocateObject) -}; - - class LAllocate: public LTemplateInstruction<1, 2, 2> { public: LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) { diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index 09a0e9c..96befb0 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -181,6 +181,7 @@ bool LCodeGen::GeneratePrologue() { __ add(fp, sp, Operand(2 * kPointerSize)); } frame_is_built_ = true; + info_->AddNoFrameRange(0, masm_->pc_offset()); } // Reserve space for the stack slots needed by the code. @@ -518,13 +519,18 @@ DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, Handle LCodeGen::ToHandle(LConstantOperand* op) const { HConstant* constant = chunk_->LookupConstant(op); - ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged()); + ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); return constant->handle(); } bool LCodeGen::IsInteger32(LConstantOperand* op) const { - return chunk_->LookupLiteralRepresentation(op).IsInteger32(); + return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); +} + + +bool LCodeGen::IsSmi(LConstantOperand* op) const { + return chunk_->LookupLiteralRepresentation(op).IsSmi(); } @@ -534,6 +540,12 @@ int LCodeGen::ToInteger32(LConstantOperand* op) const { } +Smi* LCodeGen::ToSmi(LConstantOperand* op) const { + HConstant* constant = chunk_->LookupConstant(op); + return Smi::FromInt(constant->Integer32Value()); +} + + double LCodeGen::ToDouble(LConstantOperand* op) const { HConstant* constant = chunk_->LookupConstant(op); ASSERT(constant->HasDoubleValue()); @@ -935,8 +947,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle code) { Handle literals = factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); - { ALLOW_HANDLE_DEREF(isolate(), - "copying a ZoneList of handles into a FixedArray"); + { AllowDeferredHandleDereference copy_handles; for (int i = 0; i < deoptimization_literals_.length(); i++) { literals->set(i, *deoptimization_literals_[i]); } @@ -1154,122 +1165,150 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { void LCodeGen::DoModI(LModI* instr) { - if (instr->hydrogen()->HasPowerOf2Divisor()) { - Register dividend = ToRegister(instr->left()); - Register result = ToRegister(instr->result()); + HMod* hmod = instr->hydrogen(); + HValue* left = hmod->left(); + HValue* right = hmod->right(); + if (hmod->HasPowerOf2Divisor()) { + // TODO(svenpanne) We should really do the strength reduction on the + // Hydrogen level. + Register left_reg = ToRegister(instr->left()); + Register result_reg = ToRegister(instr->result()); + + // Note: The code below even works when right contains kMinInt. + int32_t divisor = Abs(right->GetInteger32Constant()); + + Label left_is_not_negative, done; + if (left->CanBeNegative()) { + __ cmp(left_reg, Operand::Zero()); + __ b(pl, &left_is_not_negative); + __ rsb(result_reg, left_reg, Operand::Zero()); + __ and_(result_reg, result_reg, Operand(divisor - 1)); + __ rsb(result_reg, result_reg, Operand::Zero(), SetCC); + if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { + DeoptimizeIf(eq, instr->environment()); + } + __ b(&done); + } - int32_t divisor = - HConstant::cast(instr->hydrogen()->right())->Integer32Value(); + __ bind(&left_is_not_negative); + __ and_(result_reg, left_reg, Operand(divisor - 1)); + __ bind(&done); - if (divisor < 0) divisor = -divisor; + } else if (hmod->has_fixed_right_arg()) { + Register left_reg = ToRegister(instr->left()); + Register right_reg = ToRegister(instr->right()); + Register result_reg = ToRegister(instr->result()); - Label positive_dividend, done; - __ cmp(dividend, Operand::Zero()); - __ b(pl, &positive_dividend); - __ rsb(result, dividend, Operand::Zero()); - __ and_(result, result, Operand(divisor - 1), SetCC); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(eq, instr->environment()); + int32_t divisor = hmod->fixed_right_arg_value(); + ASSERT(IsPowerOf2(divisor)); + + // Check if our assumption of a fixed right operand still holds. + __ cmp(right_reg, Operand(divisor)); + DeoptimizeIf(ne, instr->environment()); + + Label left_is_not_negative, done; + if (left->CanBeNegative()) { + __ cmp(left_reg, Operand::Zero()); + __ b(pl, &left_is_not_negative); + __ rsb(result_reg, left_reg, Operand::Zero()); + __ and_(result_reg, result_reg, Operand(divisor - 1)); + __ rsb(result_reg, result_reg, Operand::Zero(), SetCC); + if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { + DeoptimizeIf(eq, instr->environment()); + } + __ b(&done); } - __ rsb(result, result, Operand::Zero()); - __ b(&done); - __ bind(&positive_dividend); - __ and_(result, dividend, Operand(divisor - 1)); + + __ bind(&left_is_not_negative); + __ and_(result_reg, left_reg, Operand(divisor - 1)); __ bind(&done); - return; - } - // These registers hold untagged 32 bit values. - Register left = ToRegister(instr->left()); - Register right = ToRegister(instr->right()); - Register result = ToRegister(instr->result()); - Label done; + } else if (CpuFeatures::IsSupported(SUDIV)) { + CpuFeatureScope scope(masm(), SUDIV); - // Check for x % 0. - if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { - __ cmp(right, Operand::Zero()); - DeoptimizeIf(eq, instr->environment()); - } + Register left_reg = ToRegister(instr->left()); + Register right_reg = ToRegister(instr->right()); + Register result_reg = ToRegister(instr->result()); - if (CpuFeatures::IsSupported(SUDIV)) { - CpuFeatureScope scope(masm(), SUDIV); - // Check for (kMinInt % -1). - if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { - Label left_not_min_int; - __ cmp(left, Operand(kMinInt)); - __ b(ne, &left_not_min_int); - __ cmp(right, Operand(-1)); + Label done; + // Check for x % 0, sdiv might signal an exception. We have to deopt in this + // case because we can't return a NaN. + if (right->CanBeZero()) { + __ cmp(right_reg, Operand::Zero()); DeoptimizeIf(eq, instr->environment()); - __ bind(&left_not_min_int); } - // For r3 = r1 % r2; we can have the following ARM code - // sdiv r3, r1, r2 - // mls r3, r3, r2, r1 + // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we + // want. We have to deopt if we care about -0, because we can't return that. + if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) { + Label no_overflow_possible; + __ cmp(left_reg, Operand(kMinInt)); + __ b(ne, &no_overflow_possible); + __ cmp(right_reg, Operand(-1)); + if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { + DeoptimizeIf(eq, instr->environment()); + } else { + __ b(ne, &no_overflow_possible); + __ mov(result_reg, Operand::Zero()); + __ jmp(&done); + } + __ bind(&no_overflow_possible); + } - __ sdiv(result, left, right); - __ mls(result, result, right, left); + // For 'r3 = r1 % r2' we can have the following ARM code: + // sdiv r3, r1, r2 + // mls r3, r3, r2, r1 - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ cmp(result, Operand::Zero()); + __ sdiv(result_reg, left_reg, right_reg); + __ mls(result_reg, result_reg, right_reg, left_reg); + + // If we care about -0, test if the dividend is <0 and the result is 0. + if (left->CanBeNegative() && + hmod->CanBeZero() && + hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { + __ cmp(result_reg, Operand::Zero()); __ b(ne, &done); - __ cmp(left, Operand::Zero()); + __ cmp(left_reg, Operand::Zero()); DeoptimizeIf(lt, instr->environment()); } + __ bind(&done); + } else { + // General case, without any SDIV support. + Register left_reg = ToRegister(instr->left()); + Register right_reg = ToRegister(instr->right()); + Register result_reg = ToRegister(instr->result()); Register scratch = scratch0(); - Register scratch2 = ToRegister(instr->temp()); - DwVfpRegister dividend = ToDoubleRegister(instr->temp2()); - DwVfpRegister divisor = ToDoubleRegister(instr->temp3()); + ASSERT(!scratch.is(left_reg)); + ASSERT(!scratch.is(right_reg)); + ASSERT(!scratch.is(result_reg)); + DwVfpRegister dividend = ToDoubleRegister(instr->temp()); + DwVfpRegister divisor = ToDoubleRegister(instr->temp2()); + ASSERT(!divisor.is(dividend)); DwVfpRegister quotient = double_scratch0(); + ASSERT(!quotient.is(dividend)); + ASSERT(!quotient.is(divisor)); - ASSERT(!dividend.is(divisor)); - ASSERT(!dividend.is(quotient)); - ASSERT(!divisor.is(quotient)); - ASSERT(!scratch.is(left)); - ASSERT(!scratch.is(right)); - ASSERT(!scratch.is(result)); - - Label vfp_modulo, right_negative; - - __ Move(result, left); - - // (0 % x) must yield 0 (if x is finite, which is the case here). - __ cmp(left, Operand::Zero()); - __ b(eq, &done); - // Preload right in a vfp register. - __ vmov(divisor.low(), right); - __ b(lt, &vfp_modulo); - - __ cmp(left, Operand(right)); - __ b(lt, &done); - - // Check for (positive) power of two on the right hand side. - __ JumpIfNotPowerOfTwoOrZeroAndNeg(right, - scratch, - &right_negative, - &vfp_modulo); - // Perform modulo operation (scratch contains right - 1). - __ and_(result, scratch, Operand(left)); - __ b(&done); - - __ bind(&right_negative); - // Negate right. The sign of the divisor does not matter. - __ rsb(right, right, Operand::Zero()); + Label done; + // Check for x % 0, we have to deopt in this case because we can't return a + // NaN. + if (right->CanBeZero()) { + __ cmp(right_reg, Operand::Zero()); + DeoptimizeIf(eq, instr->environment()); + } - __ bind(&vfp_modulo); - // Load the arguments in VFP registers. - // The divisor value is preloaded before. Be careful that 'right' - // is only live on entry. - __ vmov(dividend.low(), left); - // From here on don't use right as it may have been reallocated - // (for example to scratch2). - right = no_reg; + __ Move(result_reg, left_reg); + // Load the arguments in VFP registers. The divisor value is preloaded + // before. Be careful that 'right_reg' is only live on entry. + // TODO(svenpanne) The last comments seems to be wrong nowadays. + __ vmov(dividend.low(), left_reg); + __ vmov(divisor.low(), right_reg); __ vcvt_f64_s32(dividend, dividend.low()); __ vcvt_f64_s32(divisor, divisor.low()); - // We do not care about the sign of the divisor. + // We do not care about the sign of the divisor. Note that we still handle + // the kMinInt % -1 case correctly, though. __ vabs(divisor, divisor); // Compute the quotient and round it to a 32bit integer. __ vdiv(quotient, dividend, divisor); @@ -1281,22 +1320,18 @@ void LCodeGen::DoModI(LModI* instr) { __ vmul(double_scratch, divisor, quotient); __ vcvt_s32_f64(double_scratch.low(), double_scratch); __ vmov(scratch, double_scratch.low()); + __ sub(result_reg, left_reg, scratch, SetCC); - if (!instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ sub(result, left, scratch); - } else { - Label ok; - // Check for -0. - __ sub(scratch2, left, scratch, SetCC); - __ b(ne, &ok); - __ cmp(left, Operand::Zero()); + // If we care about -0, test if the dividend is <0 and the result is 0. + if (left->CanBeNegative() && + hmod->CanBeZero() && + hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { + __ b(ne, &done); + __ cmp(left_reg, Operand::Zero()); DeoptimizeIf(mi, instr->environment()); - __ bind(&ok); - // Load the result and we are done. - __ mov(result, scratch2); } + __ bind(&done); } - __ bind(&done); } @@ -1395,25 +1430,9 @@ void LCodeGen::EmitSignedIntegerDivisionByConstant( void LCodeGen::DoDivI(LDivI* instr) { - class DeferredDivI: public LDeferredCode { - public: - DeferredDivI(LCodeGen* codegen, LDivI* instr) - : LDeferredCode(codegen), instr_(instr) { } - virtual void Generate() { - codegen()->DoDeferredBinaryOpStub(instr_->pointer_map(), - instr_->left(), - instr_->right(), - Token::DIV); - } - virtual LInstruction* instr() { return instr_; } - private: - LDivI* instr_; - }; - if (instr->hydrogen()->HasPowerOf2Divisor()) { Register dividend = ToRegister(instr->left()); - int32_t divisor = - HConstant::cast(instr->hydrogen()->right())->Integer32Value(); + int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant(); int32_t test_value = 0; int32_t power = 0; @@ -1436,10 +1455,19 @@ void LCodeGen::DoDivI(LDivI* instr) { } if (test_value != 0) { - // Deoptimize if remainder is not 0. - __ tst(dividend, Operand(test_value)); - DeoptimizeIf(ne, instr->environment()); - __ mov(dividend, Operand(dividend, ASR, power)); + if (instr->hydrogen()->CheckFlag( + HInstruction::kAllUsesTruncatingToInt32)) { + __ cmp(dividend, Operand(0)); + __ rsb(dividend, dividend, Operand(0), LeaveCC, lt); + __ mov(dividend, Operand(dividend, ASR, power)); + if (divisor > 0) __ rsb(dividend, dividend, Operand(0), LeaveCC, lt); + return; // Don't fall through to "__ rsb" below. + } else { + // Deoptimize if remainder is not 0. + __ tst(dividend, Operand(test_value)); + DeoptimizeIf(ne, instr->environment()); + __ mov(dividend, Operand(dividend, ASR, power)); + } } if (divisor < 0) __ rsb(dividend, dividend, Operand(0)); @@ -1476,40 +1504,38 @@ void LCodeGen::DoDivI(LDivI* instr) { __ bind(&left_not_min_int); } - Label done, deoptimize; - // Test for a few common cases first. - __ cmp(right, Operand(1)); - __ mov(result, left, LeaveCC, eq); - __ b(eq, &done); - - __ cmp(right, Operand(2)); - __ tst(left, Operand(1), eq); - __ mov(result, Operand(left, ASR, 1), LeaveCC, eq); - __ b(eq, &done); - - __ cmp(right, Operand(4)); - __ tst(left, Operand(3), eq); - __ mov(result, Operand(left, ASR, 2), LeaveCC, eq); - __ b(eq, &done); - - // Call the stub. The numbers in r0 and r1 have - // to be tagged to Smis. If that is not possible, deoptimize. - DeferredDivI* deferred = new(zone()) DeferredDivI(this, instr); - - __ TrySmiTag(left, &deoptimize); - __ TrySmiTag(right, &deoptimize); - - __ b(al, deferred->entry()); - __ bind(deferred->exit()); - - // If the result in r0 is a Smi, untag it, else deoptimize. - __ JumpIfNotSmi(result, &deoptimize); - __ SmiUntag(result); - __ b(&done); + if (CpuFeatures::IsSupported(SUDIV)) { + CpuFeatureScope scope(masm(), SUDIV); + __ sdiv(result, left, right); - __ bind(&deoptimize); - DeoptimizeIf(al, instr->environment()); - __ bind(&done); + if (!instr->hydrogen()->CheckFlag( + HInstruction::kAllUsesTruncatingToInt32)) { + // Compute remainder and deopt if it's not zero. + const Register remainder = scratch0(); + __ mls(remainder, result, right, left); + __ cmp(remainder, Operand::Zero()); + DeoptimizeIf(ne, instr->environment()); + } + } else { + const DoubleRegister vleft = ToDoubleRegister(instr->temp()); + const DoubleRegister vright = double_scratch0(); + __ vmov(vleft.low(), left); + __ vmov(vright.low(), right); + __ vcvt_f64_s32(vleft, vleft.low()); + __ vcvt_f64_s32(vright, vright.low()); + __ vdiv(vleft, vleft, vright); // vleft now contains the result. + __ vcvt_s32_f64(vright.low(), vleft); + __ vmov(result, vright.low()); + + if (!instr->hydrogen()->CheckFlag( + HInstruction::kAllUsesTruncatingToInt32)) { + // Deopt if exact conversion to integer was not possible. + // Use vright as scratch register. + __ vcvt_f64_s32(vright, vright.low()); + __ VFPCompareAndSetFlags(vleft, vright); + DeoptimizeIf(ne, instr->environment()); + } + } } @@ -1608,38 +1634,6 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) { } -void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map, - LOperand* left_argument, - LOperand* right_argument, - Token::Value op) { - Register left = ToRegister(left_argument); - Register right = ToRegister(right_argument); - - PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles); - // Move left to r1 and right to r0 for the stub call. - if (left.is(r1)) { - __ Move(r0, right); - } else if (left.is(r0) && right.is(r1)) { - __ Swap(r0, r1, r2); - } else if (left.is(r0)) { - ASSERT(!right.is(r1)); - __ mov(r1, r0); - __ mov(r0, right); - } else { - ASSERT(!left.is(r0) && !right.is(r0)); - __ mov(r0, right); - __ mov(r1, left); - } - BinaryOpStub stub(op, OVERWRITE_LEFT); - __ CallStub(&stub); - RecordSafepointWithRegistersAndDoubles(pointer_map, - 0, - Safepoint::kNoLazyDeopt); - // Overwrite the stored value of r0 with the result of the stub. - __ StoreToSafepointRegistersAndDoublesSlot(r0, r0); -} - - void LCodeGen::DoMulI(LMulI* instr) { Register scratch = scratch0(); Register result = ToRegister(instr->result()); @@ -1889,7 +1883,11 @@ void LCodeGen::DoRSubI(LRSubI* instr) { void LCodeGen::DoConstantI(LConstantI* instr) { - ASSERT(instr->result()->IsRegister()); + __ mov(ToRegister(instr->result()), Operand(instr->value())); +} + + +void LCodeGen::DoConstantS(LConstantS* instr) { __ mov(ToRegister(instr->result()), Operand(instr->value())); } @@ -1904,7 +1902,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) { void LCodeGen::DoConstantT(LConstantT* instr) { Handle value = instr->value(); - ALLOW_HANDLE_DEREF(isolate(), "smi check"); + AllowDeferredHandleDereference smi_check; if (value->IsSmi()) { __ mov(ToRegister(instr->result()), Operand(value)); } else { @@ -2003,11 +2001,34 @@ void LCodeGen::DoDateField(LDateField* instr) { void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { - SeqStringSetCharGenerator::Generate(masm(), - instr->encoding(), - ToRegister(instr->string()), - ToRegister(instr->index()), - ToRegister(instr->value())); + Register string = ToRegister(instr->string()); + Register index = ToRegister(instr->index()); + Register value = ToRegister(instr->value()); + String::Encoding encoding = instr->encoding(); + + if (FLAG_debug_code) { + __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset)); + __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset)); + + __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask)); + static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; + static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; + __ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING + ? one_byte_seq_type : two_byte_seq_type)); + __ Check(eq, "Unexpected string type"); + } + + __ add(ip, + string, + Operand(SeqString::kHeaderSize - kHeapObjectTag)); + if (encoding == String::ONE_BYTE_ENCODING) { + __ strb(value, MemOperand(ip, index)); + } else { + // MemOperand with ip as the base register is not allowed for strh, so + // we do the address calculation explicitly. + __ add(ip, ip, Operand(index, LSL, 1)); + __ strh(value, MemOperand(ip)); + } } @@ -2207,11 +2228,13 @@ void LCodeGen::DoBranch(LBranch* instr) { int false_block = chunk_->LookupDestination(instr->false_block_id()); Representation r = instr->hydrogen()->value()->representation(); - if (r.IsInteger32()) { + if (r.IsInteger32() || r.IsSmi()) { + ASSERT(!info()->IsStub()); Register reg = ToRegister(instr->value()); __ cmp(reg, Operand::Zero()); EmitBranch(true_block, false_block, ne); } else if (r.IsDouble()) { + ASSERT(!info()->IsStub()); DwVfpRegister reg = ToDoubleRegister(instr->value()); // Test the double value. Zero and NaN are false. __ VFPCompareAndSetFlags(reg, 0.0); @@ -2222,9 +2245,11 @@ void LCodeGen::DoBranch(LBranch* instr) { Register reg = ToRegister(instr->value()); HType type = instr->hydrogen()->value()->type(); if (type.IsBoolean()) { + ASSERT(!info()->IsStub()); __ CompareRoot(reg, Heap::kTrueValueRootIndex); EmitBranch(true_block, false_block, eq); } else if (type.IsSmi()) { + ASSERT(!info()->IsStub()); __ cmp(reg, Operand::Zero()); EmitBranch(true_block, false_block, ne); } else { @@ -2386,11 +2411,19 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { __ b(vs, chunk_->GetAssemblyLabel(false_block)); } else { if (right->IsConstantOperand()) { - __ cmp(ToRegister(left), - Operand(ToInteger32(LConstantOperand::cast(right)))); + int32_t value = ToInteger32(LConstantOperand::cast(right)); + if (instr->hydrogen_value()->representation().IsSmi()) { + __ cmp(ToRegister(left), Operand(Smi::FromInt(value))); + } else { + __ cmp(ToRegister(left), Operand(value)); + } } else if (left->IsConstantOperand()) { - __ cmp(ToRegister(right), - Operand(ToInteger32(LConstantOperand::cast(left)))); + int32_t value = ToInteger32(LConstantOperand::cast(left)); + if (instr->hydrogen_value()->representation().IsSmi()) { + __ cmp(ToRegister(right), Operand(Smi::FromInt(value))); + } else { + __ cmp(ToRegister(right), Operand(value)); + } // We transposed the operands. Reverse the condition. cond = ReverseCondition(cond); } else { @@ -2905,8 +2938,8 @@ void LCodeGen::DoReturn(LReturn* instr) { int no_frame_start = -1; if (NeedsEagerFrame()) { __ mov(sp, fp); - __ ldm(ia_w, sp, fp.bit() | lr.bit()); no_frame_start = masm_->pc_offset(); + __ ldm(ia_w, sp, fp.bit() | lr.bit()); } if (instr->has_constant_parameter_count()) { int parameter_count = ToInteger32(instr->constant_parameter_count()); @@ -3045,7 +3078,8 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { - int offset = instr->hydrogen()->offset(); + HObjectAccess access = instr->hydrogen()->access(); + int offset = access.offset(); Register object = ToRegister(instr->object()); if (instr->hydrogen()->representation().IsDouble()) { DwVfpRegister result = ToDoubleRegister(instr->result()); @@ -3054,7 +3088,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { } Register result = ToRegister(instr->result()); - if (instr->hydrogen()->is_in_object()) { + if (access.IsInobject()) { __ ldr(result, FieldMemOperand(object, offset)); } else { __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); @@ -3123,8 +3157,7 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) { bool last = (i == map_count - 1); Handle map = instr->hydrogen()->types()->at(i); Label check_passed; - __ CompareMap( - object_map, map, &check_passed, ALLOW_ELEMENT_TRANSITION_MAPS); + __ CompareMap(object_map, map, &check_passed); if (last && !need_generic) { DeoptimizeIf(ne, instr->environment()); __ bind(&check_passed); @@ -3249,7 +3282,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { key = ToRegister(instr->key()); } int element_size_shift = ElementsKindToShiftSize(elements_kind); - int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) + int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) ? (element_size_shift - kSmiTagSize) : element_size_shift; int additional_offset = instr->additional_index() << element_size_shift; @@ -3321,7 +3354,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { Register scratch = scratch0(); int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); - int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) + int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) ? (element_size_shift - kSmiTagSize) : element_size_shift; int constant_key = 0; if (key_is_constant) { @@ -3366,7 +3399,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { // representation for the key to be an integer, the input gets replaced // during bound check elimination with the index argument to the bounds // check, which can be tagged, so that case must be handled here, too. - if (instr->hydrogen()->key()->representation().IsTagged()) { + if (instr->hydrogen()->key()->representation().IsSmi()) { __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key)); } else { __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); @@ -3924,7 +3957,10 @@ void LCodeGen::DoPower(LPower* instr) { ASSERT(ToDoubleRegister(instr->left()).is(d1)); ASSERT(ToDoubleRegister(instr->result()).is(d3)); - if (exponent_type.IsTagged()) { + if (exponent_type.IsSmi()) { + MathPowStub stub(MathPowStub::TAGGED); + __ CallStub(&stub); + } else if (exponent_type.IsTagged()) { Label no_deopt; __ JumpIfSmi(r2, &no_deopt); __ ldr(r7, FieldMemOperand(r2, HeapObject::kMapOffset)); @@ -4176,14 +4212,17 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) { __ mov(r0, Operand(instr->arity())); __ mov(r2, Operand(instr->hydrogen()->property_cell())); ElementsKind kind = instr->hydrogen()->elements_kind(); + bool disable_allocation_sites = + (AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE); + if (instr->arity() == 0) { - ArrayNoArgumentConstructorStub stub(kind); + ArrayNoArgumentConstructorStub stub(kind, disable_allocation_sites); CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); } else if (instr->arity() == 1) { - ArraySingleArgumentConstructorStub stub(kind); + ArraySingleArgumentConstructorStub stub(kind, disable_allocation_sites); CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); } else { - ArrayNArgumentsConstructorStub stub(kind); + ArrayNArgumentsConstructorStub stub(kind, disable_allocation_sites); CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); } } @@ -4206,17 +4245,13 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { Register object = ToRegister(instr->object()); Register scratch = scratch0(); - int offset = instr->offset(); + + HObjectAccess access = instr->hydrogen()->access(); + int offset = access.offset(); Handle transition = instr->transition(); - if (FLAG_track_fields && representation.IsSmi()) { - Register value = ToRegister(instr->value()); - __ SmiTag(value, value, SetCC); - if (!instr->hydrogen()->value()->range()->IsInSmiRange()) { - DeoptimizeIf(vs, instr->environment()); - } - } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { + if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { Register value = ToRegister(instr->value()); if (!instr->hydrogen()->value()->type().IsHeapObject()) { __ SmiTst(value); @@ -4224,7 +4259,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { } } else if (FLAG_track_double_fields && representation.IsDouble()) { ASSERT(transition.is_null()); - ASSERT(instr->is_in_object()); + ASSERT(access.IsInobject()); ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); DwVfpRegister value = ToDoubleRegister(instr->value()); __ vstr(value, FieldMemOperand(object, offset)); @@ -4257,7 +4292,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { HType type = instr->hydrogen()->value()->type(); SmiCheck check_needed = type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - if (instr->is_in_object()) { + if (access.IsInobject()) { __ str(value, FieldMemOperand(object, offset)); if (instr->hydrogen()->NeedsWriteBarrier()) { // Update the write barrier for the object for in-object properties. @@ -4308,7 +4343,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { if (instr->index()->IsConstantOperand()) { int constant_index = ToInteger32(LConstantOperand::cast(instr->index())); - if (instr->hydrogen()->length()->representation().IsTagged()) { + if (instr->hydrogen()->length()->representation().IsSmi()) { __ mov(ip, Operand(Smi::FromInt(constant_index))); } else { __ mov(ip, Operand(constant_index)); @@ -4336,7 +4371,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { key = ToRegister(instr->key()); } int element_size_shift = ElementsKindToShiftSize(elements_kind); - int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) + int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) ? (element_size_shift - kSmiTagSize) : element_size_shift; int additional_offset = instr->additional_index() << element_size_shift; @@ -4409,7 +4444,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { key = ToRegister(instr->key()); } int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); - int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) + int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) ? (element_size_shift - kSmiTagSize) : element_size_shift; Operand operand = key_is_constant ? Operand((constant_key << element_size_shift) + @@ -4455,7 +4490,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { // representation for the key to be an integer, the input gets replaced // during bound check elimination with the index argument to the bounds // check, which can be tagged, so that case must be handled here, too. - if (instr->hydrogen()->key()->representation().IsTagged()) { + if (instr->hydrogen()->key()->representation().IsSmi()) { __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key)); } else { __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); @@ -4702,6 +4737,19 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { } +void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) { + LOperand* input = instr->value(); + ASSERT(input->IsRegister()); + LOperand* output = instr->result(); + ASSERT(output->IsRegister()); + __ SmiTag(ToRegister(output), ToRegister(input), SetCC); + if (!instr->hydrogen()->value()->HasRange() || + !instr->hydrogen()->value()->range()->IsInSmiRange()) { + DeoptimizeIf(vs, instr->environment()); + } +} + + void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { LOperand* input = instr->value(); LOperand* output = instr->result(); @@ -4913,7 +4961,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) { void LCodeGen::EmitNumberUntagD(Register input_reg, DwVfpRegister result_reg, - bool deoptimize_on_undefined, + bool allow_undefined_as_nan, bool deoptimize_on_minus_zero, LEnvironment* env, NumberUntagDMode mode) { @@ -4923,7 +4971,9 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, Label load_smi, heap_number, done; - if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { + STATIC_ASSERT(NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE > + NUMBER_CANDIDATE_IS_ANY_TAGGED); + if (mode >= NUMBER_CANDIDATE_IS_ANY_TAGGED) { // Smi check. __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); @@ -4931,17 +4981,23 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); __ cmp(scratch, Operand(ip)); - if (deoptimize_on_undefined) { + if (!allow_undefined_as_nan) { DeoptimizeIf(ne, env); } else { - Label heap_number; + Label heap_number, convert; __ b(eq, &heap_number); + // Convert undefined (and hole) to NaN. __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); __ cmp(input_reg, Operand(ip)); + if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE) { + __ b(eq, &convert); + __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); + __ cmp(input_reg, Operand(ip)); + } DeoptimizeIf(ne, env); - // Convert undefined to NaN. + __ bind(&convert); __ LoadRoot(ip, Heap::kNanValueRootIndex); __ sub(ip, ip, Operand(kHeapObjectTag)); __ vldr(result_reg, ip, HeapNumber::kValueOffset); @@ -4961,15 +5017,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, DeoptimizeIf(eq, env); } __ jmp(&done); - } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) { - __ SmiUntag(scratch, input_reg, SetCC); - DeoptimizeIf(cs, env); - } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) { - __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); - __ Vmov(result_reg, - FixedDoubleArray::hole_nan_as_double(), - no_reg); - __ b(&done); } else { __ SmiUntag(scratch, input_reg); ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); @@ -5093,24 +5140,18 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED; HValue* value = instr->hydrogen()->value(); if (value->type().IsSmi()) { - if (value->IsLoadKeyed()) { - HLoadKeyed* load = HLoadKeyed::cast(value); - if (load->UsesMustHandleHole()) { - if (load->hole_mode() == ALLOW_RETURN_HOLE) { - mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE; - } else { - mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE; - } - } else { - mode = NUMBER_CANDIDATE_IS_SMI; + mode = NUMBER_CANDIDATE_IS_SMI; + } else if (value->IsLoadKeyed()) { + HLoadKeyed* load = HLoadKeyed::cast(value); + if (load->UsesMustHandleHole()) { + if (load->hole_mode() == ALLOW_RETURN_HOLE) { + mode = NUMBER_CANDIDATE_IS_ANY_TAGGED_CONVERT_HOLE; } - } else { - mode = NUMBER_CANDIDATE_IS_SMI; } } EmitNumberUntagD(input_reg, result_reg, - instr->hydrogen()->deoptimize_on_undefined(), + instr->hydrogen()->allow_undefined_as_nan(), instr->hydrogen()->deoptimize_on_minus_zero(), instr->environment(), mode); @@ -5124,7 +5165,33 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { DwVfpRegister double_input = ToDoubleRegister(instr->value()); DwVfpRegister double_scratch = double_scratch0(); - Label done; + if (instr->truncating()) { + Register scratch3 = ToRegister(instr->temp2()); + __ ECMAToInt32(result_reg, double_input, + scratch1, scratch2, scratch3, double_scratch); + } else { + __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); + // Deoptimize if the input wasn't a int32 (inside a double). + DeoptimizeIf(ne, instr->environment()); + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + Label done; + __ cmp(result_reg, Operand::Zero()); + __ b(ne, &done); + __ vmov(scratch1, double_input.high()); + __ tst(scratch1, Operand(HeapNumber::kSignMask)); + DeoptimizeIf(ne, instr->environment()); + __ bind(&done); + } + } +} + + +void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { + Register result_reg = ToRegister(instr->result()); + Register scratch1 = scratch0(); + Register scratch2 = ToRegister(instr->temp()); + DwVfpRegister double_input = ToDoubleRegister(instr->value()); + DwVfpRegister double_scratch = double_scratch0(); if (instr->truncating()) { Register scratch3 = ToRegister(instr->temp2()); @@ -5134,8 +5201,18 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); // Deoptimize if the input wasn't a int32 (inside a double). DeoptimizeIf(ne, instr->environment()); + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + Label done; + __ cmp(result_reg, Operand::Zero()); + __ b(ne, &done); + __ vmov(scratch1, double_input.high()); + __ tst(scratch1, Operand(HeapNumber::kSignMask)); + DeoptimizeIf(ne, instr->environment()); + __ bind(&done); + } } - __ bind(&done); + __ SmiTag(result_reg, SetCC); + DeoptimizeIf(vs, instr->environment()); } @@ -5199,7 +5276,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { void LCodeGen::DoCheckFunction(LCheckFunction* instr) { Register reg = ToRegister(instr->value()); Handle target = instr->hydrogen()->target(); - ALLOW_HANDLE_DEREF(isolate(), "smi check"); + AllowDeferredHandleDereference smi_check; if (isolate()->heap()->InNewSpace(*target)) { Register reg = ToRegister(instr->value()); Handle cell = @@ -5216,10 +5293,9 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) { void LCodeGen::DoCheckMapCommon(Register map_reg, Handle map, - CompareMapMode mode, LEnvironment* env) { Label success; - __ CompareMap(map_reg, map, &success, mode); + __ CompareMap(map_reg, map, &success); DeoptimizeIf(ne, env); __ bind(&success); } @@ -5236,11 +5312,11 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); for (int i = 0; i < map_set->length() - 1; i++) { Handle map = map_set->at(i); - __ CompareMap(map_reg, map, &success, REQUIRE_EXACT_MAP); + __ CompareMap(map_reg, map, &success); __ b(eq, &success); } Handle map = map_set->last(); - DoCheckMapCommon(map_reg, map, REQUIRE_EXACT_MAP, instr->environment()); + DoCheckMapCommon(map_reg, map, instr->environment()); __ bind(&success); } @@ -5314,89 +5390,12 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { for (int i = 0; i < prototypes->length(); i++) { __ LoadHeapObject(prototype_reg, prototypes->at(i)); __ ldr(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset)); - DoCheckMapCommon(map_reg, - maps->at(i), - ALLOW_ELEMENT_TRANSITION_MAPS, - instr->environment()); + DoCheckMapCommon(map_reg, maps->at(i), instr->environment()); } } } -void LCodeGen::DoAllocateObject(LAllocateObject* instr) { - class DeferredAllocateObject: public LDeferredCode { - public: - DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr) - : LDeferredCode(codegen), instr_(instr) { } - virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); } - virtual LInstruction* instr() { return instr_; } - private: - LAllocateObject* instr_; - }; - - DeferredAllocateObject* deferred = - new(zone()) DeferredAllocateObject(this, instr); - - Register result = ToRegister(instr->result()); - Register scratch = ToRegister(instr->temp()); - Register scratch2 = ToRegister(instr->temp2()); - Handle constructor = instr->hydrogen()->constructor(); - Handle initial_map = instr->hydrogen()->constructor_initial_map(); - int instance_size = initial_map->instance_size(); - ASSERT(initial_map->pre_allocated_property_fields() + - initial_map->unused_property_fields() - - initial_map->inobject_properties() == 0); - - __ Allocate(instance_size, result, scratch, scratch2, deferred->entry(), - TAG_OBJECT); - - __ bind(deferred->exit()); - if (FLAG_debug_code) { - Label is_in_new_space; - __ JumpIfInNewSpace(result, scratch, &is_in_new_space); - __ Abort("Allocated object is not in new-space"); - __ bind(&is_in_new_space); - } - - // Load the initial map. - Register map = scratch; - __ LoadHeapObject(map, constructor); - __ ldr(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset)); - - // Initialize map and fields of the newly allocated object. - ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE); - __ str(map, FieldMemOperand(result, JSObject::kMapOffset)); - __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex); - __ str(scratch, FieldMemOperand(result, JSObject::kElementsOffset)); - __ str(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset)); - if (initial_map->inobject_properties() != 0) { - __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); - for (int i = 0; i < initial_map->inobject_properties(); i++) { - int property_offset = JSObject::kHeaderSize + i * kPointerSize; - __ str(scratch, FieldMemOperand(result, property_offset)); - } - } -} - - -void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) { - Register result = ToRegister(instr->result()); - Handle initial_map = instr->hydrogen()->constructor_initial_map(); - int instance_size = initial_map->instance_size(); - - // TODO(3095996): Get rid of this. For now, we need to make the - // result register contain a valid pointer because it is already - // contained in the register pointer map. - __ mov(result, Operand::Zero()); - - PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); - __ mov(r0, Operand(Smi::FromInt(instance_size))); - __ push(r0); - CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr); - __ StoreToSafepointRegisterSlot(r0, result); -} - - void LCodeGen::DoAllocate(LAllocate* instr) { class DeferredAllocate: public LDeferredCode { public: @@ -5421,8 +5420,12 @@ void LCodeGen::DoAllocate(LAllocate* instr) { flags = static_cast(flags | DOUBLE_ALIGNMENT); } if (instr->hydrogen()->CanAllocateInOldPointerSpace()) { + ASSERT(!instr->hydrogen()->CanAllocateInOldDataSpace()); flags = static_cast(flags | PRETENURE_OLD_POINTER_SPACE); + } else if (instr->hydrogen()->CanAllocateInOldDataSpace()) { + flags = static_cast(flags | PRETENURE_OLD_DATA_SPACE); } + if (instr->size()->IsConstantOperand()) { int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); @@ -5460,11 +5463,12 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) { } if (instr->hydrogen()->CanAllocateInOldPointerSpace()) { - CallRuntimeFromDeferred( - Runtime::kAllocateInOldPointerSpace, 1, instr); + ASSERT(!instr->hydrogen()->CanAllocateInOldDataSpace()); + CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr); + } else if (instr->hydrogen()->CanAllocateInOldDataSpace()) { + CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr); } else { - CallRuntimeFromDeferred( - Runtime::kAllocateInNewSpace, 1, instr); + CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr); } __ StoreToSafepointRegisterSlot(r0, result); } diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h index 1a34169..f264259 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.h +++ b/deps/v8/src/arm/lithium-codegen-arm.h @@ -119,6 +119,7 @@ class LCodeGen BASE_EMBEDDED { SwVfpRegister flt_scratch, DwVfpRegister dbl_scratch); int ToInteger32(LConstantOperand* op) const; + Smi* ToSmi(LConstantOperand* op) const; double ToDouble(LConstantOperand* op) const; Operand ToOperand(LOperand* op); MemOperand ToMemOperand(LOperand* op) const; @@ -126,6 +127,7 @@ class LCodeGen BASE_EMBEDDED { MemOperand ToHighMemOperand(LOperand* op) const; bool IsInteger32(LConstantOperand* op) const; + bool IsSmi(LConstantOperand* op) const; Handle ToHandle(LConstantOperand* op) const; // Try to generate code for the entire chunk, but it may fail if the @@ -138,10 +140,6 @@ class LCodeGen BASE_EMBEDDED { void FinishCode(Handle code); // Deferred code support. - void DoDeferredBinaryOpStub(LPointerMap* pointer_map, - LOperand* left_argument, - LOperand* right_argument, - Token::Value op); void DoDeferredNumberTagD(LNumberTagD* instr); enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 }; @@ -155,13 +153,11 @@ class LCodeGen BASE_EMBEDDED { void DoDeferredRandom(LRandom* instr); void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); void DoDeferredStringCharFromCode(LStringCharFromCode* instr); - void DoDeferredAllocateObject(LAllocateObject* instr); void DoDeferredAllocate(LAllocate* instr); void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, Label* map_check); - void DoCheckMapCommon(Register map_reg, Handle map, - CompareMapMode mode, LEnvironment* env); + void DoCheckMapCommon(Register map_reg, Handle map, LEnvironment* env); // Parallel move support. void DoParallelMove(LParallelMove* move); @@ -334,7 +330,7 @@ class LCodeGen BASE_EMBEDDED { void EmitBranch(int left_block, int right_block, Condition cc); void EmitNumberUntagD(Register input, DwVfpRegister result, - bool deoptimize_on_undefined, + bool allow_undefined_as_nan, bool deoptimize_on_minus_zero, LEnvironment* env, NumberUntagDMode mode); diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/arm/lithium-gap-resolver-arm.cc index 596d58f..352fbb9 100644 --- a/deps/v8/src/arm/lithium-gap-resolver-arm.cc +++ b/deps/v8/src/arm/lithium-gap-resolver-arm.cc @@ -248,7 +248,9 @@ void LGapResolver::EmitMove(int index) { LConstantOperand* constant_source = LConstantOperand::cast(source); if (destination->IsRegister()) { Register dst = cgen_->ToRegister(destination); - if (cgen_->IsInteger32(constant_source)) { + if (cgen_->IsSmi(constant_source)) { + __ mov(dst, Operand(cgen_->ToSmi(constant_source))); + } else if (cgen_->IsInteger32(constant_source)) { __ mov(dst, Operand(cgen_->ToInteger32(constant_source))); } else { __ LoadObject(dst, cgen_->ToHandle(constant_source)); @@ -256,7 +258,9 @@ void LGapResolver::EmitMove(int index) { } else { ASSERT(destination->IsStackSlot()); ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone. - if (cgen_->IsInteger32(constant_source)) { + if (cgen_->IsSmi(constant_source)) { + __ mov(kSavedValueRegister, Operand(cgen_->ToSmi(constant_source))); + } else if (cgen_->IsInteger32(constant_source)) { __ mov(kSavedValueRegister, Operand(cgen_->ToInteger32(constant_source))); } else { diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index a3b21a2..f3cfdc7 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -74,7 +74,7 @@ void MacroAssembler::Jump(Handle code, RelocInfo::Mode rmode, Condition cond) { ASSERT(RelocInfo::IsCodeTarget(rmode)); // 'code' is always generated ARM code, never THUMB code - ALLOW_HANDLE_DEREF(isolate(), "embedding raw address"); + AllowDeferredHandleDereference embedding_raw_address; Jump(reinterpret_cast(code.location()), rmode, cond); } @@ -163,7 +163,7 @@ int MacroAssembler::CallSize(Handle code, RelocInfo::Mode rmode, TypeFeedbackId ast_id, Condition cond) { - ALLOW_HANDLE_DEREF(isolate(), "using raw address"); + AllowDeferredHandleDereference using_raw_address; return CallSize(reinterpret_cast
(code.location()), rmode, cond); } @@ -181,7 +181,7 @@ void MacroAssembler::Call(Handle code, rmode = RelocInfo::CODE_TARGET_WITH_ID; } // 'code' is always generated ARM code, never THUMB code - ALLOW_HANDLE_DEREF(isolate(), "embedding raw address"); + AllowDeferredHandleDereference embedding_raw_address; Call(reinterpret_cast
(code.location()), rmode, cond, mode); } @@ -398,7 +398,7 @@ void MacroAssembler::StoreRoot(Register source, void MacroAssembler::LoadHeapObject(Register result, Handle object) { - ALLOW_HANDLE_DEREF(isolate(), "using raw address"); + AllowDeferredHandleDereference using_raw_address; if (isolate()->heap()->InNewSpace(*object)) { Handle cell = isolate()->factory()->NewJSGlobalPropertyCell(object); @@ -2105,32 +2105,16 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, void MacroAssembler::CompareMap(Register obj, Register scratch, Handle map, - Label* early_success, - CompareMapMode mode) { + Label* early_success) { ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); - CompareMap(scratch, map, early_success, mode); + CompareMap(scratch, map, early_success); } void MacroAssembler::CompareMap(Register obj_map, Handle map, - Label* early_success, - CompareMapMode mode) { + Label* early_success) { cmp(obj_map, Operand(map)); - if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) { - ElementsKind kind = map->elements_kind(); - if (IsFastElementsKind(kind)) { - bool packed = IsFastPackedElementsKind(kind); - Map* current_map = *map; - while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) { - kind = GetNextMoreGeneralFastElementsKind(kind, packed); - current_map = current_map->LookupElementsTransitionMap(kind); - if (!current_map) break; - b(eq, early_success); - cmp(obj_map, Operand(Handle(current_map))); - } - } - } } @@ -2138,14 +2122,13 @@ void MacroAssembler::CheckMap(Register obj, Register scratch, Handle map, Label* fail, - SmiCheckType smi_check_type, - CompareMapMode mode) { + SmiCheckType smi_check_type) { if (smi_check_type == DO_SMI_CHECK) { JumpIfSmi(obj, fail); } Label success; - CompareMap(obj, scratch, map, &success, mode); + CompareMap(obj, scratch, map, &success); b(ne, fail); bind(&success); } diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 50f53b3..11d3066 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -162,7 +162,7 @@ class MacroAssembler: public Assembler { void LoadHeapObject(Register dst, Handle object); void LoadObject(Register result, Handle object) { - ALLOW_HANDLE_DEREF(isolate(), "heap object check"); + AllowDeferredHandleDereference heap_object_check; if (object->IsHeapObject()) { LoadHeapObject(result, Handle::cast(object)); } else { @@ -884,15 +884,13 @@ class MacroAssembler: public Assembler { void CompareMap(Register obj, Register scratch, Handle map, - Label* early_success, - CompareMapMode mode = REQUIRE_EXACT_MAP); + Label* early_success); // As above, but the map of the object is already loaded into the register // which is preserved by the code generated. void CompareMap(Register obj_map, Handle map, - Label* early_success, - CompareMapMode mode = REQUIRE_EXACT_MAP); + Label* early_success); // Check if the map of an object is equal to a specified map and branch to // label if not. Skip the smi check if not required (object is known to be a @@ -902,8 +900,7 @@ class MacroAssembler: public Assembler { Register scratch, Handle map, Label* fail, - SmiCheckType smi_check_type, - CompareMapMode mode = REQUIRE_EXACT_MAP); + SmiCheckType smi_check_type); void CheckMap(Register obj, diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc index da7afee..f05cba5 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc @@ -122,7 +122,7 @@ RegExpMacroAssemblerARM::RegExpMacroAssemblerARM( int registers_to_save, Zone* zone) : NativeRegExpMacroAssembler(zone), - masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)), + masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)), mode_(mode), num_registers_(registers_to_save), num_saved_registers_(registers_to_save), @@ -235,54 +235,6 @@ void RegExpMacroAssemblerARM::CheckCharacterLT(uc16 limit, Label* on_less) { } -void RegExpMacroAssemblerARM::CheckCharacters(Vector str, - int cp_offset, - Label* on_failure, - bool check_end_of_string) { - if (on_failure == NULL) { - // Instead of inlining a backtrack for each test, (re)use the global - // backtrack target. - on_failure = &backtrack_label_; - } - - if (check_end_of_string) { - // Is last character of required match inside string. - CheckPosition(cp_offset + str.length() - 1, on_failure); - } - - __ add(r0, end_of_input_address(), Operand(current_input_offset())); - if (cp_offset != 0) { - int byte_offset = cp_offset * char_size(); - __ add(r0, r0, Operand(byte_offset)); - } - - // r0 : Address of characters to match against str. - int stored_high_byte = 0; - for (int i = 0; i < str.length(); i++) { - if (mode_ == ASCII) { - __ ldrb(r1, MemOperand(r0, char_size(), PostIndex)); - ASSERT(str[i] <= String::kMaxOneByteCharCode); - __ cmp(r1, Operand(str[i])); - } else { - __ ldrh(r1, MemOperand(r0, char_size(), PostIndex)); - uc16 match_char = str[i]; - int match_high_byte = (match_char >> 8); - if (match_high_byte == 0) { - __ cmp(r1, Operand(str[i])); - } else { - if (match_high_byte != stored_high_byte) { - __ mov(r2, Operand(match_high_byte)); - stored_high_byte = match_high_byte; - } - __ add(r3, r2, Operand(match_char & 0xff)); - __ cmp(r1, r3); - } - } - BranchOrBacktrack(ne, on_failure); - } -} - - void RegExpMacroAssemblerARM::CheckGreedyLoop(Label* on_equal) { __ ldr(r0, MemOperand(backtrack_stackpointer(), 0)); __ cmp(current_input_offset(), r0); @@ -556,7 +508,7 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type, case 'd': // Match ASCII digits ('0'..'9') __ sub(r0, current_character(), Operand('0')); - __ cmp(current_character(), Operand('9' - '0')); + __ cmp(r0, Operand('9' - '0')); BranchOrBacktrack(hi, on_no_match); return true; case 'D': @@ -917,9 +869,8 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { CodeDesc code_desc; masm_->GetCode(&code_desc); - Handle code = FACTORY->NewCode(code_desc, - Code::ComputeFlags(Code::REGEXP), - masm_->CodeObject()); + Handle code = isolate()->factory()->NewCode( + code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject()); PROFILE(Isolate::Current(), RegExpCodeCreateEvent(*code, *source)); return Handle::cast(code); } diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h index 921d8f5..1825752 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.h +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h @@ -53,10 +53,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { Label* on_equal); virtual void CheckCharacterGT(uc16 limit, Label* on_greater); virtual void CheckCharacterLT(uc16 limit, Label* on_less); - virtual void CheckCharacters(Vector str, - int cp_offset, - Label* on_failure, - bool check_end_of_string); // A "greedy loop" is a loop that is both greedy and with a simple // body. It has a particularly simple implementation. virtual void CheckGreedyLoop(Label* on_tos_equals_current_position); diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index b0de014..3595b52 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -462,7 +462,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm, // Check that the map of the object hasn't changed. __ CheckMap(receiver_reg, scratch1, Handle(object->map()), miss_label, - DO_SMI_CHECK, REQUIRE_EXACT_MAP); + DO_SMI_CHECK); // Perform global security token check if needed. if (object->IsJSGlobalProxy()) { @@ -581,6 +581,8 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm, index -= object->map()->inobject_properties(); // TODO(verwaest): Share this code as a code stub. + SmiCheck smi_check = representation.IsTagged() + ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; if (index < 0) { // Set the property straight into the object. int offset = object->map()->instance_size() + (index * kPointerSize); @@ -606,7 +608,9 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm, name_reg, scratch1, kLRHasNotBeenSaved, - kDontSaveFPRegs); + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + smi_check); } } else { // Write to the properties array. @@ -636,7 +640,9 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm, name_reg, receiver_reg, kLRHasNotBeenSaved, - kDontSaveFPRegs); + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + smi_check); } } @@ -665,7 +671,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Check that the map of the object hasn't changed. __ CheckMap(receiver_reg, scratch1, Handle(object->map()), miss_label, - DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); + DO_SMI_CHECK); // Perform global security token check if needed. if (object->IsJSGlobalProxy()) { @@ -723,6 +729,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, } // TODO(verwaest): Share this code as a code stub. + SmiCheck smi_check = representation.IsTagged() + ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; if (index < 0) { // Set the property straight into the object. int offset = object->map()->instance_size() + (index * kPointerSize); @@ -740,7 +748,9 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, name_reg, scratch1, kLRHasNotBeenSaved, - kDontSaveFPRegs); + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + smi_check); } } else { // Write to the properties array. @@ -762,7 +772,9 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, name_reg, receiver_reg, kLRHasNotBeenSaved, - kDontSaveFPRegs); + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + smi_check); } } @@ -881,11 +893,12 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, // -- sp[4] : callee JS function // -- sp[8] : call data // -- sp[12] : isolate - // -- sp[16] : ReturnValue - // -- sp[20] : last JS argument + // -- sp[16] : ReturnValue default value + // -- sp[20] : ReturnValue + // -- sp[24] : last JS argument // -- ... - // -- sp[(argc + 4) * 4] : first JS argument - // -- sp[(argc + 5) * 4] : receiver + // -- sp[(argc + 5) * 4] : first JS argument + // -- sp[(argc + 6) * 4] : receiver // ----------------------------------- // Get the function and setup the context. Handle function = optimization.constant_function(); @@ -902,13 +915,14 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, __ Move(r6, call_data); } __ mov(r7, Operand(ExternalReference::isolate_address(masm->isolate()))); - // Store JS function, call data, isolate and ReturnValue. + // Store JS function, call data, isolate ReturnValue default and ReturnValue. __ stm(ib, sp, r5.bit() | r6.bit() | r7.bit()); __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); __ str(r5, MemOperand(sp, 4 * kPointerSize)); + __ str(r5, MemOperand(sp, 5 * kPointerSize)); // Prepare arguments. - __ add(r2, sp, Operand(4 * kPointerSize)); + __ add(r2, sp, Operand(5 * kPointerSize)); // Allocate the v8::Arguments structure in the arguments' space since // it's not controlled by GC. @@ -1247,8 +1261,7 @@ Register StubCompiler::CheckPrototypes(Handle object, if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) { Handle current_map(current->map()); // CheckMap implicitly loads the map of |reg| into |map_reg|. - __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK, - ALLOW_ELEMENT_TRANSITION_MAPS); + __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK); } else { __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); } @@ -1285,7 +1298,7 @@ Register StubCompiler::CheckPrototypes(Handle object, if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) { // Check the holder map. __ CheckMap(reg, scratch1, Handle(holder->map()), miss, - DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); + DONT_DO_SMI_CHECK); } // Perform security check for access to the global object. @@ -1422,10 +1435,12 @@ void BaseLoadStubCompiler::GenerateLoadCallback( __ Move(scratch3(), Handle(callback->data(), isolate())); } __ Push(reg, scratch3()); - __ mov(scratch3(), + __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex); + __ mov(scratch4(), scratch3()); + __ Push(scratch3(), scratch4()); + __ mov(scratch4(), Operand(ExternalReference::isolate_address(isolate()))); - __ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex); - __ Push(scratch3(), scratch4(), name()); + __ Push(scratch4(), name()); __ mov(r0, sp); // r0 = Handle const int kApiStackSpace = 1; @@ -1451,7 +1466,7 @@ void BaseLoadStubCompiler::GenerateLoadCallback( __ CallApiFunctionAndReturn(ref, kStackUnwindSpace, returns_handle, - 3); + 5); } @@ -2797,7 +2812,7 @@ Handle StoreStubCompiler::CompileStoreInterceptor( // Check that the map of the object hasn't changed. __ CheckMap(receiver(), scratch1(), Handle(object->map()), &miss, - DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); + DO_SMI_CHECK); // Perform global security token check if needed. if (object->IsJSGlobalProxy()) { @@ -3080,151 +3095,6 @@ Handle KeyedStoreStubCompiler::CompileStorePolymorphic( } -Handle ConstructStubCompiler::CompileConstructStub( - Handle function) { - // ----------- S t a t e ------------- - // -- r0 : argc - // -- r1 : constructor - // -- lr : return address - // -- [sp] : last argument - // ----------------------------------- - Label generic_stub_call; - - // Use r7 for holding undefined which is used in several places below. - __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); - -#ifdef ENABLE_DEBUGGER_SUPPORT - // Check to see whether there are any break points in the function code. If - // there are jump to the generic constructor stub which calls the actual - // code for the function thereby hitting the break points. - __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); - __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset)); - __ cmp(r2, r7); - __ b(ne, &generic_stub_call); -#endif - - // Load the initial map and verify that it is in fact a map. - // r1: constructor function - // r7: undefined - __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); - __ JumpIfSmi(r2, &generic_stub_call); - __ CompareObjectType(r2, r3, r4, MAP_TYPE); - __ b(ne, &generic_stub_call); - -#ifdef DEBUG - // Cannot construct functions this way. - // r0: argc - // r1: constructor function - // r2: initial map - // r7: undefined - __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE); - __ Check(ne, "Function constructed by construct stub."); -#endif - - // Now allocate the JSObject in new space. - // r0: argc - // r1: constructor function - // r2: initial map - // r7: undefined - ASSERT(function->has_initial_map()); - __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset)); -#ifdef DEBUG - int instance_size = function->initial_map()->instance_size(); - __ cmp(r3, Operand(instance_size >> kPointerSizeLog2)); - __ Check(eq, "Instance size of initial map changed."); -#endif - __ Allocate(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS); - - // Allocated the JSObject, now initialize the fields. Map is set to initial - // map and properties and elements are set to empty fixed array. - // r0: argc - // r1: constructor function - // r2: initial map - // r3: object size (in words) - // r4: JSObject (not tagged) - // r7: undefined - __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex); - __ mov(r5, r4); - ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); - __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); - ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); - __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); - ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset); - __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); - - // Calculate the location of the first argument. The stack contains only the - // argc arguments. - __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2)); - - // Fill all the in-object properties with undefined. - // r0: argc - // r1: first argument - // r3: object size (in words) - // r4: JSObject (not tagged) - // r5: First in-object property of JSObject (not tagged) - // r7: undefined - // Fill the initialized properties with a constant value or a passed argument - // depending on the this.x = ...; assignment in the function. - Handle shared(function->shared()); - for (int i = 0; i < shared->this_property_assignments_count(); i++) { - if (shared->IsThisPropertyAssignmentArgument(i)) { - Label not_passed, next; - // Check if the argument assigned to the property is actually passed. - int arg_number = shared->GetThisPropertyAssignmentArgument(i); - __ cmp(r0, Operand(arg_number)); - __ b(le, ¬_passed); - // Argument passed - find it on the stack. - __ ldr(r2, MemOperand(r1, (arg_number + 1) * -kPointerSize)); - __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); - __ b(&next); - __ bind(¬_passed); - // Set the property to undefined. - __ str(r7, MemOperand(r5, kPointerSize, PostIndex)); - __ bind(&next); - } else { - // Set the property to the constant value. - Handle constant(shared->GetThisPropertyAssignmentConstant(i), - isolate()); - __ mov(r2, Operand(constant)); - __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); - } - } - - // Fill the unused in-object property fields with undefined. - for (int i = shared->this_property_assignments_count(); - i < function->initial_map()->inobject_properties(); - i++) { - __ str(r7, MemOperand(r5, kPointerSize, PostIndex)); - } - - // r0: argc - // r4: JSObject (not tagged) - // Move argc to r1 and the JSObject to return to r0 and tag it. - __ mov(r1, r0); - __ mov(r0, r4); - __ orr(r0, r0, Operand(kHeapObjectTag)); - - // r0: JSObject - // r1: argc - // Remove caller arguments and receiver from the stack and return. - __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2)); - __ add(sp, sp, Operand(kPointerSize)); - Counters* counters = isolate()->counters(); - __ IncrementCounter(counters->constructed_objects(), 1, r1, r2); - __ IncrementCounter(counters->constructed_objects_stub(), 1, r1, r2); - __ Jump(lr); - - // Jump to the generic stub in case the specialized code cannot handle the - // construction. - __ bind(&generic_stub_call); - Handle code = isolate()->builtins()->JSConstructStubGeneric(); - __ Jump(code, RelocInfo::CODE_TARGET); - - // Return the generated code. - return GetCode(); -} - - #undef __ #define __ ACCESS_MASM(masm) diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js index 599fd5c..5f89ebb 100644 --- a/deps/v8/src/array.js +++ b/deps/v8/src/array.js @@ -395,6 +395,23 @@ function ArrayJoin(separator) { } +function ObservedArrayPop(n) { + n--; + var value = this[n]; + + EnqueueSpliceRecord(this, n, [value], 0); + + try { + BeginPerformSplice(this); + delete this[n]; + this.length = n; + } finally { + EndPerformSplice(this); + } + + return value; +} + // Removes the last element from the array and returns it. See // ECMA-262, section 15.4.4.6. function ArrayPop() { @@ -408,6 +425,10 @@ function ArrayPop() { this.length = n; return; } + + if (%IsObserved(this)) + return ObservedArrayPop.call(this, n); + n--; var value = this[n]; delete this[n]; @@ -420,11 +441,10 @@ function ObservedArrayPush() { var n = TO_UINT32(this.length); var m = %_ArgumentsLength(); - EnqueueSpliceRecord(this, n, [], 0, m); + EnqueueSpliceRecord(this, n, [], m); try { BeginPerformSplice(this); - for (var i = 0; i < m; i++) { this[i+n] = %_Arguments(i); } @@ -558,6 +578,22 @@ function ArrayReverse() { } +function ObservedArrayShift(len) { + var first = this[0]; + + EnqueueSpliceRecord(this, 0, [first], 0); + + try { + BeginPerformSplice(this); + SimpleMove(this, 0, 1, len, 0); + this.length = len - 1; + } finally { + EndPerformSplice(this); + } + + return first; +} + function ArrayShift() { if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) { throw MakeTypeError("called_on_null_or_undefined", @@ -571,9 +607,12 @@ function ArrayShift() { return; } + if (%IsObserved(this)) + return ObservedArrayShift.call(this, len); + var first = this[0]; - if (IS_ARRAY(this) && !%IsObserved(this)) { + if (IS_ARRAY(this)) { SmartMove(this, 0, 1, len, 0); } else { SimpleMove(this, 0, 1, len, 0); @@ -584,6 +623,25 @@ function ArrayShift() { return first; } +function ObservedArrayUnshift() { + var len = TO_UINT32(this.length); + var num_arguments = %_ArgumentsLength(); + + EnqueueSpliceRecord(this, 0, [], num_arguments); + + try { + BeginPerformSplice(this); + SimpleMove(this, 0, 0, len, num_arguments); + for (var i = 0; i < num_arguments; i++) { + this[i] = %_Arguments(i); + } + this.length = len + num_arguments; + } finally { + EndPerformSplice(this); + } + + return len + num_arguments; +} function ArrayUnshift(arg1) { // length == 1 if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) { @@ -591,10 +649,13 @@ function ArrayUnshift(arg1) { // length == 1 ["Array.prototype.unshift"]); } + if (%IsObserved(this)) + return ObservedArrayUnshift.apply(this, arguments); + var len = TO_UINT32(this.length); var num_arguments = %_ArgumentsLength(); - if (IS_ARRAY(this) && !%IsObserved(this)) { + if (IS_ARRAY(this)) { SmartMove(this, 0, 0, len, num_arguments); } else { SimpleMove(this, 0, 0, len, num_arguments); @@ -655,52 +716,99 @@ function ArraySlice(start, end) { } -function ArraySplice(start, delete_count) { - if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) { - throw MakeTypeError("called_on_null_or_undefined", - ["Array.prototype.splice"]); - } - - var num_arguments = %_ArgumentsLength(); - - var len = TO_UINT32(this.length); - var start_i = TO_INTEGER(start); - +function ComputeSpliceStartIndex(start_i, len) { if (start_i < 0) { start_i += len; - if (start_i < 0) start_i = 0; - } else { - if (start_i > len) start_i = len; + return start_i < 0 ? 0 : start_i; } + return start_i > len ? len : start_i; +} + + +function ComputeSpliceDeleteCount(delete_count, num_arguments, len, start_i) { // SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is // given as a request to delete all the elements from the start. // And it differs from the case of undefined delete count. // This does not follow ECMA-262, but we do the same for // compatibility. var del_count = 0; - if (num_arguments == 1) { - del_count = len - start_i; - } else { - del_count = TO_INTEGER(delete_count); - if (del_count < 0) del_count = 0; - if (del_count > len - start_i) del_count = len - start_i; - } + if (num_arguments == 1) + return len - start_i; + + del_count = TO_INTEGER(delete_count); + if (del_count < 0) + return 0; + + if (del_count > len - start_i) + return len - start_i; + + return del_count; +} + +function ObservedArraySplice(start, delete_count) { + var num_arguments = %_ArgumentsLength(); + var len = TO_UINT32(this.length); + var start_i = ComputeSpliceStartIndex(TO_INTEGER(start), len); + var del_count = ComputeSpliceDeleteCount(delete_count, num_arguments, len, + start_i); var deleted_elements = []; deleted_elements.length = del_count; + var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0; + + try { + BeginPerformSplice(this); - // Number of elements to add. - var num_additional_args = 0; - if (num_arguments > 2) { - num_additional_args = num_arguments - 2; + SimpleSlice(this, start_i, del_count, len, deleted_elements); + SimpleMove(this, start_i, del_count, len, num_elements_to_add); + + // Insert the arguments into the resulting array in + // place of the deleted elements. + var i = start_i; + var arguments_index = 2; + var arguments_length = %_ArgumentsLength(); + while (arguments_index < arguments_length) { + this[i++] = %_Arguments(arguments_index++); + } + this.length = len - del_count + num_elements_to_add; + + } finally { + EndPerformSplice(this); + if (deleted_elements.length || num_elements_to_add) { + EnqueueSpliceRecord(this, + start_i, + deleted_elements.slice(), + num_elements_to_add); + } } - var use_simple_splice = true; + // Return the deleted elements. + return deleted_elements; +} + + +function ArraySplice(start, delete_count) { + if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) { + throw MakeTypeError("called_on_null_or_undefined", + ["Array.prototype.splice"]); + } + if (%IsObserved(this)) + return ObservedArraySplice.apply(this, arguments); + + var num_arguments = %_ArgumentsLength(); + var len = TO_UINT32(this.length); + var start_i = ComputeSpliceStartIndex(TO_INTEGER(start), len); + var del_count = ComputeSpliceDeleteCount(delete_count, num_arguments, len, + start_i); + var deleted_elements = []; + deleted_elements.length = del_count; + var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0; + + var use_simple_splice = true; if (IS_ARRAY(this) && - !%IsObserved(this) && - num_additional_args !== del_count) { + num_elements_to_add !== del_count) { // If we are only deleting/moving a few things near the end of the // array then the simple version is going to be faster, because it // doesn't touch most of the array. @@ -712,10 +820,10 @@ function ArraySplice(start, delete_count) { if (use_simple_splice) { SimpleSlice(this, start_i, del_count, len, deleted_elements); - SimpleMove(this, start_i, del_count, len, num_additional_args); + SimpleMove(this, start_i, del_count, len, num_elements_to_add); } else { SmartSlice(this, start_i, del_count, len, deleted_elements); - SmartMove(this, start_i, del_count, len, num_additional_args); + SmartMove(this, start_i, del_count, len, num_elements_to_add); } // Insert the arguments into the resulting array in @@ -726,7 +834,7 @@ function ArraySplice(start, delete_count) { while (arguments_index < arguments_length) { this[i++] = %_Arguments(arguments_index++); } - this.length = len - del_count + num_additional_args; + this.length = len - del_count + num_elements_to_add; // Return the deleted elements. return deleted_elements; @@ -1001,11 +1109,13 @@ function ArraySort(comparefn) { max_prototype_element = CopyFromPrototype(this, length); } - var num_non_undefined = %RemoveArrayHoles(this, length); + var num_non_undefined = %IsObserved(this) ? + -1 : %RemoveArrayHoles(this, length); + if (num_non_undefined == -1) { - // There were indexed accessors in the array. Move array holes and - // undefineds to the end using a Javascript function that is safe - // in the presence of accessors. + // The array is observed, or there were indexed accessors in the array. + // Move array holes and undefineds to the end using a Javascript function + // that is safe in the presence of accessors and is observable. num_non_undefined = SafeRemoveArrayHoles(this); } diff --git a/deps/v8/src/assert-scope.h b/deps/v8/src/assert-scope.h new file mode 100644 index 0000000..e2ec542 --- /dev/null +++ b/deps/v8/src/assert-scope.h @@ -0,0 +1,168 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_ASSERT_SCOPE_H_ +#define V8_ASSERT_SCOPE_H_ + +#include "allocation.h" +#include "platform.h" + +namespace v8 { +namespace internal { + +class Isolate; + +enum PerThreadAssertType { + HEAP_ALLOCATION_ASSERT, + HANDLE_ALLOCATION_ASSERT, + HANDLE_DEREFERENCE_ASSERT, + DEFERRED_HANDLE_DEREFERENCE_ASSERT, + LAST_PER_THREAD_ASSERT_TYPE +}; + + +#ifdef DEBUG +class PerThreadAssertData { + public: + PerThreadAssertData() : nesting_level_(0) { + for (int i = 0; i < LAST_PER_THREAD_ASSERT_TYPE; i++) { + assert_states_[i] = true; + } + } + + void set(PerThreadAssertType type, bool allow) { + assert_states_[type] = allow; + } + + bool get(PerThreadAssertType type) const { + return assert_states_[type]; + } + + void increment_level() { ++nesting_level_; } + bool decrement_level() { return --nesting_level_ == 0; } + + private: + bool assert_states_[LAST_PER_THREAD_ASSERT_TYPE]; + int nesting_level_; + + DISALLOW_COPY_AND_ASSIGN(PerThreadAssertData); +}; +#endif // DEBUG + + +class PerThreadAssertScopeBase { +#ifdef DEBUG + + protected: + PerThreadAssertScopeBase() { + data_ = AssertData(); + data_->increment_level(); + } + + ~PerThreadAssertScopeBase() { + if (!data_->decrement_level()) return; + for (int i = 0; i < LAST_PER_THREAD_ASSERT_TYPE; i++) { + ASSERT(data_->get(static_cast(i))); + } + delete data_; + Thread::SetThreadLocal(thread_local_key, NULL); + } + + static PerThreadAssertData* AssertData() { + PerThreadAssertData* data = reinterpret_cast( + Thread::GetThreadLocal(thread_local_key)); + if (data == NULL) { + data = new PerThreadAssertData(); + Thread::SetThreadLocal(thread_local_key, data); + } + return data; + } + + static Thread::LocalStorageKey thread_local_key; + PerThreadAssertData* data_; + friend class Isolate; +#endif // DEBUG +}; + + + +template +class PerThreadAssertScope : public PerThreadAssertScopeBase { + public: +#ifndef DEBUG + PerThreadAssertScope() { } + static void SetIsAllowed(bool is_allowed) { } +#else + PerThreadAssertScope() { + old_state_ = data_->get(type); + data_->set(type, allow); + } + + ~PerThreadAssertScope() { data_->set(type, old_state_); } + + static bool IsAllowed() { return AssertData()->get(type); } + + private: + bool old_state_; +#endif +}; + +// Scope to document where we do not expect handles to be created. +typedef PerThreadAssertScope + DisallowHandleAllocation; + +// Scope to introduce an exception to DisallowHandleAllocation. +typedef PerThreadAssertScope + AllowHandleAllocation; + +// Scope to document where we do not expect any allocation and GC. +typedef PerThreadAssertScope + DisallowHeapAllocation; + +// Scope to introduce an exception to DisallowHeapAllocation. +typedef PerThreadAssertScope + AllowHeapAllocation; + +// Scope to document where we do not expect any handle dereferences. +typedef PerThreadAssertScope + DisallowHandleDereference; + +// Scope to introduce an exception to DisallowHandleDereference. +typedef PerThreadAssertScope + AllowHandleDereference; + +// Scope to document where we do not expect deferred handles to be dereferenced. +typedef PerThreadAssertScope + DisallowDeferredHandleDereference; + +// Scope to introduce an exception to DisallowDeferredHandleDereference. +typedef PerThreadAssertScope + AllowDeferredHandleDereference; + +} } // namespace v8::internal + +#endif // V8_ASSERT_SCOPE_H_ diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc index d241355..a5d1e2d 100644 --- a/deps/v8/src/ast.cc +++ b/deps/v8/src/ast.cc @@ -30,6 +30,7 @@ #include // For isfinite. #include "builtins.h" #include "code-stubs.h" +#include "contexts.h" #include "conversions.h" #include "hashmap.h" #include "parser.h" @@ -181,9 +182,9 @@ LanguageMode FunctionLiteral::language_mode() const { } -ObjectLiteral::Property::Property(Literal* key, - Expression* value, - Isolate* isolate) { +ObjectLiteralProperty::ObjectLiteralProperty(Literal* key, + Expression* value, + Isolate* isolate) { emit_store_ = true; key_ = key; value_ = value; @@ -201,7 +202,8 @@ ObjectLiteral::Property::Property(Literal* key, } -ObjectLiteral::Property::Property(bool is_getter, FunctionLiteral* value) { +ObjectLiteralProperty::ObjectLiteralProperty(bool is_getter, + FunctionLiteral* value) { emit_store_ = true; value_ = value; kind_ = is_getter ? GETTER : SETTER; @@ -415,6 +417,16 @@ bool FunctionDeclaration::IsInlineable() const { // ---------------------------------------------------------------------------- // Recording of type feedback +void ForInStatement::RecordTypeFeedback(TypeFeedbackOracle* oracle) { + for_in_type_ = static_cast(oracle->ForInType(this)); +} + + +void Expression::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) { + to_boolean_types_ = oracle->ToBooleanTypes(test_id()); +} + + void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone) { // Record type feedback from the oracle in the AST. @@ -486,6 +498,7 @@ void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle, oracle->CollectKeyedReceiverTypes(id, &receiver_types_); } store_mode_ = oracle->GetStoreMode(id); + type_ = oracle->IncrementType(this); } @@ -575,6 +588,32 @@ bool Call::ComputeGlobalTarget(Handle global, } +Handle Call::GetPrototypeForPrimitiveCheck( + CheckType check, Isolate* isolate) { + v8::internal::Context* native_context = isolate->context()->native_context(); + JSFunction* function = NULL; + switch (check) { + case RECEIVER_MAP_CHECK: + UNREACHABLE(); + break; + case STRING_CHECK: + function = native_context->string_function(); + break; + case SYMBOL_CHECK: + function = native_context->symbol_function(); + break; + case NUMBER_CHECK: + function = native_context->number_function(); + break; + case BOOLEAN_CHECK: + function = native_context->boolean_function(); + break; + } + ASSERT(function != NULL); + return Handle(JSObject::cast(function->instance_prototype())); +} + + void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle, CallKind call_kind) { is_monomorphic_ = oracle->CallIsMonomorphic(this); @@ -606,8 +645,7 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle, map = receiver_types_.at(0); } else { ASSERT(check_type_ != RECEIVER_MAP_CHECK); - holder_ = Handle( - oracle->GetPrototypeForPrimitiveCheck(check_type_)); + holder_ = GetPrototypeForPrimitiveCheck(check_type_, oracle->isolate()); map = Handle(holder_->map()); } is_monomorphic_ = ComputeTarget(map, name); @@ -617,10 +655,14 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle, void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) { + allocation_info_cell_ = oracle->GetCallNewAllocationInfoCell(this); is_monomorphic_ = oracle->CallNewIsMonomorphic(this); if (is_monomorphic_) { target_ = oracle->GetCallNewTarget(this); - elements_kind_ = oracle->GetCallNewElementsKind(this); + Object* value = allocation_info_cell_->value(); + if (value->IsSmi()) { + elements_kind_ = static_cast(Smi::cast(value)->value()); + } } } @@ -632,6 +674,31 @@ void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) { } +void UnaryOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) { + type_ = oracle->UnaryType(this); +} + + +void BinaryOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) { + oracle->BinaryType(this, &left_type_, &right_type_, &result_type_, + &has_fixed_right_arg_, &fixed_right_arg_value_); +} + + +void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) { + oracle->CompareType(this, &left_type_, &right_type_, &overall_type_); + if (!overall_type_.IsUninitialized() && overall_type_.IsNonPrimitive() && + (op_ == Token::EQ || op_ == Token::EQ_STRICT)) { + map_ = oracle->GetCompareMap(this); + } else { + // May be a compare to nil. + map_ = oracle->CompareNilMonomorphicReceiverType(this); + if (op_ != Token::EQ_STRICT) + compare_nil_types_ = oracle->CompareNilTypes(this); + } +} + + // ---------------------------------------------------------------------------- // Implementation of AstVisitor @@ -723,12 +790,12 @@ Interval RegExpQuantifier::CaptureRegisters() { bool RegExpAssertion::IsAnchoredAtStart() { - return type() == RegExpAssertion::START_OF_INPUT; + return assertion_type() == RegExpAssertion::START_OF_INPUT; } bool RegExpAssertion::IsAnchoredAtEnd() { - return type() == RegExpAssertion::END_OF_INPUT; + return assertion_type() == RegExpAssertion::END_OF_INPUT; } @@ -860,7 +927,7 @@ void* RegExpUnparser::VisitCharacterClass(RegExpCharacterClass* that, void* RegExpUnparser::VisitAssertion(RegExpAssertion* that, void* data) { - switch (that->type()) { + switch (that->assertion_type()) { case RegExpAssertion::START_OF_INPUT: stream()->Add("@^i"); break; @@ -1087,6 +1154,7 @@ DONT_SELFOPTIMIZE_NODE(DoWhileStatement) DONT_SELFOPTIMIZE_NODE(WhileStatement) DONT_SELFOPTIMIZE_NODE(ForStatement) DONT_SELFOPTIMIZE_NODE(ForInStatement) +DONT_SELFOPTIMIZE_NODE(ForOfStatement) DONT_CACHE_NODE(ModuleLiteral) @@ -1115,6 +1183,7 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) { Handle Literal::ToString() { if (handle_->IsString()) return Handle::cast(handle_); + Factory* factory = Isolate::Current()->factory(); ASSERT(handle_->IsNumber()); char arr[100]; Vector buffer(arr, ARRAY_SIZE(arr)); @@ -1126,7 +1195,7 @@ Handle Literal::ToString() { } else { str = DoubleToCString(handle_->Number(), buffer); } - return FACTORY->NewStringFromAscii(CStrVector(str)); + return factory->NewStringFromAscii(CStrVector(str)); } diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h index ad7b119..219a69b 100644 --- a/deps/v8/src/ast.h +++ b/deps/v8/src/ast.h @@ -39,6 +39,8 @@ #include "small-pointer-list.h" #include "smart-pointers.h" #include "token.h" +#include "type-info.h" // TODO(rossberg): this should eventually be removed +#include "types.h" #include "utils.h" #include "variables.h" #include "interface.h" @@ -88,6 +90,7 @@ namespace internal { V(WhileStatement) \ V(ForStatement) \ V(ForInStatement) \ + V(ForOfStatement) \ V(TryCatchStatement) \ V(TryFinallyStatement) \ V(DebuggerStatement) @@ -162,9 +165,9 @@ typedef ZoneList > ZoneStringList; typedef ZoneList > ZoneObjectList; -#define DECLARE_NODE_TYPE(type) \ - virtual void Accept(AstVisitor* v); \ - virtual AstNode::Type node_type() const { return AstNode::k##type; } \ +#define DECLARE_NODE_TYPE(type) \ + virtual void Accept(AstVisitor* v); \ + virtual AstNode::NodeType node_type() const { return AstNode::k##type; } \ template friend class AstNodeFactory; @@ -196,7 +199,7 @@ class AstProperties BASE_EMBEDDED { class AstNode: public ZoneObject { public: #define DECLARE_TYPE_ENUM(type) k##type, - enum Type { + enum NodeType { AST_NODE_LIST(DECLARE_TYPE_ENUM) kInvalid = -1 }; @@ -211,7 +214,7 @@ class AstNode: public ZoneObject { virtual ~AstNode() { } virtual void Accept(AstVisitor* v) = 0; - virtual Type node_type() const = 0; + virtual NodeType node_type() const = 0; // Type testing & conversion functions overridden by concrete subclasses. #define DECLARE_NODE_FUNCTIONS(type) \ @@ -353,6 +356,9 @@ class Expression: public AstNode { // True iff the expression is the undefined literal. bool IsUndefinedLiteral(); + // Expression type + Handle type() { return type_; } + // Type feedback information for assignments and properties. virtual bool IsMonomorphic() { UNREACHABLE(); @@ -373,15 +379,23 @@ class Expression: public AstNode { return STANDARD_STORE; } + // TODO(rossberg): this should move to its own AST node eventually. + void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle); + byte to_boolean_types() const { return to_boolean_types_; } + BailoutId id() const { return id_; } TypeFeedbackId test_id() const { return test_id_; } protected: explicit Expression(Isolate* isolate) - : id_(GetNextId(isolate)), + : type_(Type::Any(), isolate), + id_(GetNextId(isolate)), test_id_(GetNextId(isolate)) {} private: + Handle type_; + byte to_boolean_types_; + const BailoutId id_; const TypeFeedbackId test_id_; }; @@ -389,7 +403,7 @@ class Expression: public AstNode { class BreakableStatement: public Statement { public: - enum Type { + enum BreakableType { TARGET_FOR_ANONYMOUS, TARGET_FOR_NAMED_ONLY }; @@ -405,15 +419,18 @@ class BreakableStatement: public Statement { Label* break_target() { return &break_target_; } // Testers. - bool is_target_for_anonymous() const { return type_ == TARGET_FOR_ANONYMOUS; } + bool is_target_for_anonymous() const { + return breakable_type_ == TARGET_FOR_ANONYMOUS; + } BailoutId EntryId() const { return entry_id_; } BailoutId ExitId() const { return exit_id_; } protected: - BreakableStatement(Isolate* isolate, ZoneStringList* labels, Type type) + BreakableStatement( + Isolate* isolate, ZoneStringList* labels, BreakableType breakable_type) : labels_(labels), - type_(type), + breakable_type_(breakable_type), entry_id_(GetNextId(isolate)), exit_id_(GetNextId(isolate)) { ASSERT(labels == NULL || labels->length() > 0); @@ -422,7 +439,7 @@ class BreakableStatement: public Statement { private: ZoneStringList* labels_; - Type type_; + BreakableType breakable_type_; Label break_target_; const BailoutId entry_id_; const BailoutId exit_id_; @@ -716,6 +733,7 @@ class IterationStatement: public BreakableStatement { private: Statement* body_; Label continue_target_; + const BailoutId osr_entry_id_; }; @@ -751,7 +769,9 @@ class DoWhileStatement: public IterationStatement { private: Expression* cond_; + int condition_position_; + const BailoutId continue_id_; const BailoutId back_edge_id_; }; @@ -788,8 +808,10 @@ class WhileStatement: public IterationStatement { private: Expression* cond_; + // True if there is a function literal subexpression in the condition. bool may_have_function_literal_; + const BailoutId body_id_; }; @@ -843,51 +865,142 @@ class ForStatement: public IterationStatement { Statement* init_; Expression* cond_; Statement* next_; + // True if there is a function literal subexpression in the condition. bool may_have_function_literal_; Variable* loop_variable_; + const BailoutId continue_id_; const BailoutId body_id_; }; -class ForInStatement: public IterationStatement { +class ForEachStatement: public IterationStatement { public: - DECLARE_NODE_TYPE(ForInStatement) + enum VisitMode { + ENUMERATE, // for (each in subject) body; + ITERATE // for (each of subject) body; + }; - void Initialize(Expression* each, Expression* enumerable, Statement* body) { + void Initialize(Expression* each, Expression* subject, Statement* body) { IterationStatement::Initialize(body); each_ = each; - enumerable_ = enumerable; + subject_ = subject; } Expression* each() const { return each_; } - Expression* enumerable() const { return enumerable_; } + Expression* subject() const { return subject_; } - virtual BailoutId ContinueId() const { return EntryId(); } - virtual BailoutId StackCheckId() const { return body_id_; } - BailoutId BodyId() const { return body_id_; } - BailoutId PrepareId() const { return prepare_id_; } + protected: + ForEachStatement(Isolate* isolate, ZoneStringList* labels) + : IterationStatement(isolate, labels), + each_(NULL), + subject_(NULL) { + } + + private: + Expression* each_; + Expression* subject_; +}; + + +class ForInStatement: public ForEachStatement { + public: + DECLARE_NODE_TYPE(ForInStatement) + + Expression* enumerable() const { + return subject(); + } TypeFeedbackId ForInFeedbackId() const { return reuse(PrepareId()); } + void RecordTypeFeedback(TypeFeedbackOracle* oracle); + enum ForInType { FAST_FOR_IN, SLOW_FOR_IN }; + ForInType for_in_type() const { return for_in_type_; } + + BailoutId BodyId() const { return body_id_; } + BailoutId PrepareId() const { return prepare_id_; } + virtual BailoutId ContinueId() const { return EntryId(); } + virtual BailoutId StackCheckId() const { return body_id_; } protected: ForInStatement(Isolate* isolate, ZoneStringList* labels) - : IterationStatement(isolate, labels), - each_(NULL), - enumerable_(NULL), + : ForEachStatement(isolate, labels), + for_in_type_(SLOW_FOR_IN), body_id_(GetNextId(isolate)), prepare_id_(GetNextId(isolate)) { } - private: - Expression* each_; - Expression* enumerable_; + ForInType for_in_type_; const BailoutId body_id_; const BailoutId prepare_id_; }; +class ForOfStatement: public ForEachStatement { + public: + DECLARE_NODE_TYPE(ForOfStatement) + + void Initialize(Expression* each, + Expression* subject, + Statement* body, + Expression* assign_iterator, + Expression* next_result, + Expression* result_done, + Expression* assign_each) { + ForEachStatement::Initialize(each, subject, body); + assign_iterator_ = assign_iterator; + next_result_ = next_result; + result_done_ = result_done; + assign_each_ = assign_each; + } + + Expression* iterable() const { + return subject(); + } + + // var iterator = iterable; + Expression* assign_iterator() const { + return assign_iterator_; + } + + // var result = iterator.next(); + Expression* next_result() const { + return next_result_; + } + + // result.done + Expression* result_done() const { + return result_done_; + } + + // each = result.value + Expression* assign_each() const { + return assign_each_; + } + + virtual BailoutId ContinueId() const { return EntryId(); } + virtual BailoutId StackCheckId() const { return BackEdgeId(); } + + BailoutId BackEdgeId() const { return back_edge_id_; } + + protected: + ForOfStatement(Isolate* isolate, ZoneStringList* labels) + : ForEachStatement(isolate, labels), + assign_iterator_(NULL), + next_result_(NULL), + result_done_(NULL), + assign_each_(NULL), + back_edge_id_(GetNextId(isolate)) { + } + + Expression* assign_iterator_; + Expression* next_result_; + Expression* result_done_; + Expression* assign_each_; + const BailoutId back_edge_id_; +}; + + class ExpressionStatement: public Statement { public: DECLARE_NODE_TYPE(ExpressionStatement) @@ -1023,11 +1136,16 @@ class SwitchStatement: public BreakableStatement { void Initialize(Expression* tag, ZoneList* cases) { tag_ = tag; cases_ = cases; + switch_type_ = UNKNOWN_SWITCH; } Expression* tag() const { return tag_; } ZoneList* cases() const { return cases_; } + enum SwitchType { UNKNOWN_SWITCH, SMI_SWITCH, STRING_SWITCH, GENERIC_SWITCH }; + SwitchType switch_type() const { return switch_type_; } + void set_switch_type(SwitchType switch_type) { switch_type_ = switch_type; } + protected: SwitchStatement(Isolate* isolate, ZoneStringList* labels) : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS), @@ -1037,6 +1155,7 @@ class SwitchStatement: public BreakableStatement { private: Expression* tag_; ZoneList* cases_; + SwitchType switch_type_; }; @@ -1096,7 +1215,7 @@ class TargetCollector: public AstNode { // Virtual behaviour. TargetCollectors are never part of the AST. virtual void Accept(AstVisitor* v) { UNREACHABLE(); } - virtual Type node_type() const { return kInvalid; } + virtual NodeType node_type() const { return kInvalid; } virtual TargetCollector* AsTargetCollector() { return this; } ZoneList* targets() { return &targets_; } @@ -1282,52 +1401,55 @@ class MaterializedLiteral: public Expression { }; +// Property is used for passing information +// about an object literal's properties from the parser +// to the code generator. +class ObjectLiteralProperty: public ZoneObject { + public: + enum Kind { + CONSTANT, // Property with constant value (compile time). + COMPUTED, // Property with computed value (execution time). + MATERIALIZED_LITERAL, // Property value is a materialized literal. + GETTER, SETTER, // Property is an accessor function. + PROTOTYPE // Property is __proto__. + }; + + ObjectLiteralProperty(Literal* key, Expression* value, Isolate* isolate); + + Literal* key() { return key_; } + Expression* value() { return value_; } + Kind kind() { return kind_; } + + // Type feedback information. + void RecordTypeFeedback(TypeFeedbackOracle* oracle); + bool IsMonomorphic() { return !receiver_type_.is_null(); } + Handle GetReceiverType() { return receiver_type_; } + + bool IsCompileTimeValue(); + + void set_emit_store(bool emit_store); + bool emit_store(); + + protected: + template friend class AstNodeFactory; + + ObjectLiteralProperty(bool is_getter, FunctionLiteral* value); + void set_key(Literal* key) { key_ = key; } + + private: + Literal* key_; + Expression* value_; + Kind kind_; + bool emit_store_; + Handle receiver_type_; +}; + + // An object literal has a boilerplate object that is used // for minimizing the work when constructing it at runtime. class ObjectLiteral: public MaterializedLiteral { public: - // Property is used for passing information - // about an object literal's properties from the parser - // to the code generator. - class Property: public ZoneObject { - public: - enum Kind { - CONSTANT, // Property with constant value (compile time). - COMPUTED, // Property with computed value (execution time). - MATERIALIZED_LITERAL, // Property value is a materialized literal. - GETTER, SETTER, // Property is an accessor function. - PROTOTYPE // Property is __proto__. - }; - - Property(Literal* key, Expression* value, Isolate* isolate); - - Literal* key() { return key_; } - Expression* value() { return value_; } - Kind kind() { return kind_; } - - // Type feedback information. - void RecordTypeFeedback(TypeFeedbackOracle* oracle); - bool IsMonomorphic() { return !receiver_type_.is_null(); } - Handle GetReceiverType() { return receiver_type_; } - - bool IsCompileTimeValue(); - - void set_emit_store(bool emit_store); - bool emit_store(); - - protected: - template friend class AstNodeFactory; - - Property(bool is_getter, FunctionLiteral* value); - void set_key(Literal* key) { key_ = key; } - - private: - Literal* key_; - Expression* value_; - Kind kind_; - bool emit_store_; - Handle receiver_type_; - }; + typedef ObjectLiteralProperty Property; DECLARE_NODE_TYPE(ObjectLiteral) @@ -1590,6 +1712,11 @@ class Call: public Expression { BailoutId ReturnId() const { return return_id_; } + // TODO(rossberg): this should really move somewhere else (and be merged with + // various similar methods in objets.cc), but for now... + static Handle GetPrototypeForPrimitiveCheck( + CheckType check, Isolate* isolate); + #ifdef DEBUG // Used to assert that the FullCodeGenerator records the return site. bool return_is_recorded_; @@ -1636,10 +1763,13 @@ class CallNew: public Expression { TypeFeedbackId CallNewFeedbackId() const { return reuse(id()); } void RecordTypeFeedback(TypeFeedbackOracle* oracle); virtual bool IsMonomorphic() { return is_monomorphic_; } - Handle target() { return target_; } + Handle target() const { return target_; } + ElementsKind elements_kind() const { return elements_kind_; } + Handle allocation_info_cell() const { + return allocation_info_cell_; + } BailoutId ReturnId() const { return return_id_; } - ElementsKind elements_kind() const { return elements_kind_; } protected: CallNew(Isolate* isolate, @@ -1651,8 +1781,8 @@ class CallNew: public Expression { arguments_(arguments), pos_(pos), is_monomorphic_(false), - return_id_(GetNextId(isolate)), - elements_kind_(GetInitialFastElementsKind()) { } + elements_kind_(GetInitialFastElementsKind()), + return_id_(GetNextId(isolate)) { } private: Expression* expression_; @@ -1661,9 +1791,10 @@ class CallNew: public Expression { bool is_monomorphic_; Handle target_; + ElementsKind elements_kind_; + Handle allocation_info_cell_; const BailoutId return_id_; - ElementsKind elements_kind_; }; @@ -1713,6 +1844,8 @@ class UnaryOperation: public Expression { BailoutId MaterializeFalseId() { return materialize_false_id_; } TypeFeedbackId UnaryOperationFeedbackId() const { return reuse(id()); } + void RecordTypeFeedback(TypeFeedbackOracle* oracle); + TypeInfo type() const { return type_; } protected: UnaryOperation(Isolate* isolate, @@ -1733,6 +1866,8 @@ class UnaryOperation: public Expression { Expression* expression_; int pos_; + TypeInfo type_; + // For unary not (Token::NOT), the AST ids where true and false will // actually be materialized, respectively. const BailoutId materialize_true_id_; @@ -1754,6 +1889,12 @@ class BinaryOperation: public Expression { BailoutId RightId() const { return right_id_; } TypeFeedbackId BinaryOperationFeedbackId() const { return reuse(id()); } + void RecordTypeFeedback(TypeFeedbackOracle* oracle); + TypeInfo left_type() const { return left_type_; } + TypeInfo right_type() const { return right_type_; } + TypeInfo result_type() const { return result_type_; } + bool has_fixed_right_arg() const { return has_fixed_right_arg_; } + int fixed_right_arg_value() const { return fixed_right_arg_value_; } protected: BinaryOperation(Isolate* isolate, @@ -1775,6 +1916,13 @@ class BinaryOperation: public Expression { Expression* left_; Expression* right_; int pos_; + + TypeInfo left_type_; + TypeInfo right_type_; + TypeInfo result_type_; + bool has_fixed_right_arg_; + int fixed_right_arg_value_; + // The short-circuit logical operations need an AST ID for their // right-hand subexpression. const BailoutId right_id_; @@ -1804,6 +1952,7 @@ class CountOperation: public Expression { virtual KeyedAccessStoreMode GetStoreMode() { return store_mode_; } + TypeInfo type() const { return type_; } BailoutId AssignmentId() const { return assignment_id_; } @@ -1832,6 +1981,8 @@ class CountOperation: public Expression { bool is_monomorphic_ : 1; KeyedAccessStoreMode store_mode_ : 5; // Windows treats as signed, // must have extra bit. + TypeInfo type_; + Expression* expression_; int pos_; const BailoutId assignment_id_; @@ -1851,6 +2002,12 @@ class CompareOperation: public Expression { // Type feedback information. TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); } + void RecordTypeFeedback(TypeFeedbackOracle* oracle); + TypeInfo left_type() const { return left_type_; } + TypeInfo right_type() const { return right_type_; } + TypeInfo overall_type() const { return overall_type_; } + byte compare_nil_types() const { return compare_nil_types_; } + Handle map() const { return map_; } // Match special cases. bool IsLiteralCompareTypeof(Expression** expr, Handle* check); @@ -1876,6 +2033,12 @@ class CompareOperation: public Expression { Expression* left_; Expression* right_; int pos_; + + TypeInfo left_type_; + TypeInfo right_type_; + TypeInfo overall_type_; + byte compare_nil_types_; + Handle map_; }; @@ -2048,7 +2211,7 @@ class Throw: public Expression { class FunctionLiteral: public Expression { public: - enum Type { + enum FunctionType { ANONYMOUS_EXPRESSION, NAMED_EXPRESSION, DECLARATION @@ -2092,12 +2255,6 @@ class FunctionLiteral: public Expression { int materialized_literal_count() { return materialized_literal_count_; } int expected_property_count() { return expected_property_count_; } int handler_count() { return handler_count_; } - bool has_only_simple_this_property_assignments() { - return HasOnlySimpleThisPropertyAssignments::decode(bitfield_); - } - Handle this_property_assignments() { - return this_property_assignments_; - } int parameter_count() { return parameter_count_; } bool AllowsLazyCompilation(); @@ -2152,10 +2309,8 @@ class FunctionLiteral: public Expression { int materialized_literal_count, int expected_property_count, int handler_count, - bool has_only_simple_this_property_assignments, - Handle this_property_assignments, int parameter_count, - Type type, + FunctionType function_type, ParameterFlag has_duplicate_parameters, IsFunctionFlag is_function, IsParenthesizedFlag is_parenthesized, @@ -2164,7 +2319,6 @@ class FunctionLiteral: public Expression { name_(name), scope_(scope), body_(body), - this_property_assignments_(this_property_assignments), inferred_name_(isolate->factory()->empty_string()), materialized_literal_count_(materialized_literal_count), expected_property_count_(expected_property_count), @@ -2172,10 +2326,8 @@ class FunctionLiteral: public Expression { parameter_count_(parameter_count), function_token_position_(RelocInfo::kNoPosition) { bitfield_ = - HasOnlySimpleThisPropertyAssignments::encode( - has_only_simple_this_property_assignments) | - IsExpression::encode(type != DECLARATION) | - IsAnonymous::encode(type == ANONYMOUS_EXPRESSION) | + IsExpression::encode(function_type != DECLARATION) | + IsAnonymous::encode(function_type == ANONYMOUS_EXPRESSION) | Pretenure::encode(false) | HasDuplicateParameters::encode(has_duplicate_parameters) | IsFunction::encode(is_function) | @@ -2187,7 +2339,6 @@ class FunctionLiteral: public Expression { Handle name_; Scope* scope_; ZoneList* body_; - Handle this_property_assignments_; Handle inferred_name_; AstProperties ast_properties_; @@ -2198,14 +2349,13 @@ class FunctionLiteral: public Expression { int function_token_position_; unsigned bitfield_; - class HasOnlySimpleThisPropertyAssignments: public BitField {}; - class IsExpression: public BitField {}; - class IsAnonymous: public BitField {}; - class Pretenure: public BitField {}; - class HasDuplicateParameters: public BitField {}; - class IsFunction: public BitField {}; - class IsParenthesized: public BitField {}; - class IsGenerator: public BitField {}; + class IsExpression: public BitField {}; + class IsAnonymous: public BitField {}; + class Pretenure: public BitField {}; + class HasDuplicateParameters: public BitField {}; + class IsFunction: public BitField {}; + class IsParenthesized: public BitField {}; + class IsGenerator: public BitField {}; }; @@ -2323,7 +2473,7 @@ class RegExpAlternative: public RegExpTree { class RegExpAssertion: public RegExpTree { public: - enum Type { + enum AssertionType { START_OF_LINE, START_OF_INPUT, END_OF_LINE, @@ -2331,7 +2481,7 @@ class RegExpAssertion: public RegExpTree { BOUNDARY, NON_BOUNDARY }; - explicit RegExpAssertion(Type type) : type_(type) { } + explicit RegExpAssertion(AssertionType type) : assertion_type_(type) { } virtual void* Accept(RegExpVisitor* visitor, void* data); virtual RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success); @@ -2341,9 +2491,9 @@ class RegExpAssertion: public RegExpTree { virtual bool IsAnchoredAtEnd(); virtual int min_match() { return 0; } virtual int max_match() { return 0; } - Type type() { return type_; } + AssertionType assertion_type() { return assertion_type_; } private: - Type type_; + AssertionType assertion_type_; }; @@ -2456,13 +2606,13 @@ class RegExpText: public RegExpTree { class RegExpQuantifier: public RegExpTree { public: - enum Type { GREEDY, NON_GREEDY, POSSESSIVE }; - RegExpQuantifier(int min, int max, Type type, RegExpTree* body) + enum QuantifierType { GREEDY, NON_GREEDY, POSSESSIVE }; + RegExpQuantifier(int min, int max, QuantifierType type, RegExpTree* body) : body_(body), min_(min), max_(max), min_match_(min * body->min_match()), - type_(type) { + quantifier_type_(type) { if (max > 0 && body->max_match() > kInfinity / max) { max_match_ = kInfinity; } else { @@ -2486,9 +2636,9 @@ class RegExpQuantifier: public RegExpTree { virtual int max_match() { return max_match_; } int min() { return min_; } int max() { return max_; } - bool is_possessive() { return type_ == POSSESSIVE; } - bool is_non_greedy() { return type_ == NON_GREEDY; } - bool is_greedy() { return type_ == GREEDY; } + bool is_possessive() { return quantifier_type_ == POSSESSIVE; } + bool is_non_greedy() { return quantifier_type_ == NON_GREEDY; } + bool is_greedy() { return quantifier_type_ == GREEDY; } RegExpTree* body() { return body_; } private: @@ -2497,7 +2647,7 @@ class RegExpQuantifier: public RegExpTree { int max_; int min_match_; int max_match_; - Type type_; + QuantifierType quantifier_type_; }; @@ -2788,10 +2938,25 @@ class AstNodeFactory BASE_EMBEDDED { STATEMENT_WITH_LABELS(DoWhileStatement) STATEMENT_WITH_LABELS(WhileStatement) STATEMENT_WITH_LABELS(ForStatement) - STATEMENT_WITH_LABELS(ForInStatement) STATEMENT_WITH_LABELS(SwitchStatement) #undef STATEMENT_WITH_LABELS + ForEachStatement* NewForEachStatement(ForEachStatement::VisitMode visit_mode, + ZoneStringList* labels) { + switch (visit_mode) { + case ForEachStatement::ENUMERATE: { + ForInStatement* stmt = new(zone_) ForInStatement(isolate_, labels); + VISIT_AND_RETURN(ForInStatement, stmt); + } + case ForEachStatement::ITERATE: { + ForOfStatement* stmt = new(zone_) ForOfStatement(isolate_, labels); + VISIT_AND_RETURN(ForOfStatement, stmt); + } + } + UNREACHABLE(); + return NULL; + } + ModuleStatement* NewModuleStatement(VariableProxy* proxy, Block* body) { ModuleStatement* stmt = new(zone_) ModuleStatement(proxy, body); VISIT_AND_RETURN(ModuleStatement, stmt) @@ -3028,19 +3193,16 @@ class AstNodeFactory BASE_EMBEDDED { int materialized_literal_count, int expected_property_count, int handler_count, - bool has_only_simple_this_property_assignments, - Handle this_property_assignments, int parameter_count, FunctionLiteral::ParameterFlag has_duplicate_parameters, - FunctionLiteral::Type type, + FunctionLiteral::FunctionType function_type, FunctionLiteral::IsFunctionFlag is_function, FunctionLiteral::IsParenthesizedFlag is_parenthesized, FunctionLiteral::IsGeneratorFlag is_generator) { FunctionLiteral* lit = new(zone_) FunctionLiteral( isolate_, name, scope, body, materialized_literal_count, expected_property_count, handler_count, - has_only_simple_this_property_assignments, this_property_assignments, - parameter_count, type, has_duplicate_parameters, is_function, + parameter_count, function_type, has_duplicate_parameters, is_function, is_parenthesized, is_generator); // Top-level literal doesn't count for the AST's properties. if (is_function == FunctionLiteral::kIsFunction) { diff --git a/deps/v8/src/atomicops_internals_mips_gcc.h b/deps/v8/src/atomicops_internals_mips_gcc.h index 9498fd7..cb8f8b9 100644 --- a/deps/v8/src/atomicops_internals_mips_gcc.h +++ b/deps/v8/src/atomicops_internals_mips_gcc.h @@ -30,8 +30,6 @@ #ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_ #define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_ -#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") - namespace v8 { namespace internal { @@ -111,9 +109,9 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment) { - ATOMICOPS_COMPILER_BARRIER(); + MemoryBarrier(); Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); - ATOMICOPS_COMPILER_BARRIER(); + MemoryBarrier(); return res; } @@ -126,19 +124,16 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { - ATOMICOPS_COMPILER_BARRIER(); Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); - ATOMICOPS_COMPILER_BARRIER(); + MemoryBarrier(); return res; } inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { - ATOMICOPS_COMPILER_BARRIER(); - Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); - ATOMICOPS_COMPILER_BARRIER(); - return res; + MemoryBarrier(); + return NoBarrier_CompareAndSwap(ptr, old_value, new_value); } inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { @@ -176,6 +171,4 @@ inline Atomic32 Release_Load(volatile const Atomic32* ptr) { } } // namespace v8::internal -#undef ATOMICOPS_COMPILER_BARRIER - #endif // V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_ diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index 7c9e436..a51a9b1 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -1086,11 +1086,13 @@ bool Genesis::InitializeGlobal(Handle inner_global, CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes( result, factory->length_string(), - factory->undefined_value(), DONT_ENUM)); + factory->undefined_value(), DONT_ENUM, + Object::FORCE_TAGGED)); CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes( result, factory->callee_string(), - factory->undefined_value(), DONT_ENUM)); + factory->undefined_value(), DONT_ENUM, + Object::FORCE_TAGGED)); #ifdef DEBUG LookupResult lookup(isolate); @@ -1320,10 +1322,11 @@ void Genesis::InitializeExperimentalGlobal() { if (FLAG_harmony_array_buffer) { // -- A r r a y B u f f e r Handle array_buffer_fun = - InstallFunction(global, "ArrayBuffer", JS_ARRAY_BUFFER_TYPE, - JSArrayBuffer::kSize, - isolate()->initial_object_prototype(), - Builtins::kIllegal, true, true); + InstallFunction( + global, "ArrayBuffer", JS_ARRAY_BUFFER_TYPE, + JSArrayBuffer::kSizeWithInternalFields, + isolate()->initial_object_prototype(), + Builtins::kIllegal, true, true); native_context()->set_array_buffer_fun(*array_buffer_fun); } @@ -1574,6 +1577,11 @@ void Genesis::InstallExperimentalNativeFunctions() { } if (FLAG_harmony_observation) { INSTALL_NATIVE(JSFunction, "NotifyChange", observers_notify_change); + INSTALL_NATIVE(JSFunction, "EnqueueSpliceRecord", observers_enqueue_splice); + INSTALL_NATIVE(JSFunction, "BeginPerformSplice", + observers_begin_perform_splice); + INSTALL_NATIVE(JSFunction, "EndPerformSplice", + observers_end_perform_splice); INSTALL_NATIVE(JSFunction, "DeliverChangeRecords", observers_deliver_changes); } @@ -1604,19 +1612,23 @@ Handle Genesis::InstallInternalArray( factory()->NewJSObject(isolate()->object_function(), TENURED); SetPrototype(array_function, prototype); - array_function->shared()->set_construct_stub( - isolate()->builtins()->builtin(Builtins::kCommonArrayConstructCode)); + if (FLAG_optimize_constructed_arrays) { + InternalArrayConstructorStub internal_array_constructor_stub(isolate()); + Handle code = internal_array_constructor_stub.GetCode(isolate()); + array_function->shared()->set_construct_stub(*code); + } else { + array_function->shared()->set_construct_stub( + isolate()->builtins()->builtin(Builtins::kCommonArrayConstructCode)); + } array_function->shared()->DontAdaptArguments(); - MaybeObject* maybe_map = array_function->initial_map()->Copy(); - Map* new_map; - if (!maybe_map->To(&new_map)) return Handle::null(); - new_map->set_elements_kind(elements_kind); - array_function->set_initial_map(new_map); + Handle original_map(array_function->initial_map()); + Handle initial_map = factory()->CopyMap(original_map); + initial_map->set_elements_kind(elements_kind); + array_function->set_initial_map(*initial_map); // Make "length" magic on instances. - Handle initial_map(array_function->initial_map()); Handle array_descriptors( factory()->NewDescriptorArray(0, 1)); DescriptorArray::WhitenessWitness witness(*array_descriptors); @@ -1870,14 +1882,11 @@ bool Genesis::InstallNatives() { { Handle array_function = InstallInternalArray(builtins, "InternalArray", FAST_HOLEY_ELEMENTS); - if (array_function.is_null()) return false; native_context()->set_internal_array_function(*array_function); } { - Handle array_function = - InstallInternalArray(builtins, "InternalPackedArray", FAST_ELEMENTS); - if (array_function.is_null()) return false; + InstallInternalArray(builtins, "InternalPackedArray", FAST_ELEMENTS); } if (FLAG_disable_native_files) { @@ -2129,7 +2138,8 @@ void Genesis::InstallJSFunctionResultCaches() { #undef F ; - Handle caches = FACTORY->NewFixedArray(kNumberOfCaches, TENURED); + Handle caches = + factory()->NewFixedArray(kNumberOfCaches, TENURED); int index = 0; @@ -2148,7 +2158,7 @@ void Genesis::InstallJSFunctionResultCaches() { void Genesis::InitializeNormalizedMapCaches() { Handle array( - FACTORY->NewFixedArray(NormalizedMapCache::kEntries, TENURED)); + factory()->NewFixedArray(NormalizedMapCache::kEntries, TENURED)); native_context()->set_normalized_map_cache(NormalizedMapCache::cast(*array)); } @@ -2508,14 +2518,13 @@ void Genesis::TransferIndexedProperties(Handle from, // Cloning the elements array is sufficient. Handle from_elements = Handle(FixedArray::cast(from->elements())); - Handle to_elements = FACTORY->CopyFixedArray(from_elements); + Handle to_elements = factory()->CopyFixedArray(from_elements); to->set_elements(*to_elements); } void Genesis::TransferObject(Handle from, Handle to) { HandleScope outer(isolate()); - Factory* factory = isolate()->factory(); ASSERT(!from->IsJSArray()); ASSERT(!to->IsJSArray()); @@ -2525,7 +2534,7 @@ void Genesis::TransferObject(Handle from, Handle to) { // Transfer the prototype (new map is needed). Handle old_to_map = Handle(to->map()); - Handle new_to_map = factory->CopyMap(old_to_map); + Handle new_to_map = factory()->CopyMap(old_to_map); new_to_map->set_prototype(from->map()->prototype()); to->set_map(*new_to_map); } diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h index 476ac12..3097800 100644 --- a/deps/v8/src/bootstrapper.h +++ b/deps/v8/src/bootstrapper.h @@ -65,13 +65,14 @@ class SourceCodeCache BASE_EMBEDDED { } void Add(Vector name, Handle shared) { - HandleScope scope(shared->GetIsolate()); + Isolate* isolate = shared->GetIsolate(); + Factory* factory = isolate->factory(); + HandleScope scope(isolate); int length = cache_->length(); - Handle new_array = - FACTORY->NewFixedArray(length + 2, TENURED); + Handle new_array = factory->NewFixedArray(length + 2, TENURED); cache_->CopyTo(0, *new_array, 0, cache_->length()); cache_ = *new_array; - Handle str = FACTORY->NewStringFromAscii(name, TENURED); + Handle str = factory->NewStringFromAscii(name, TENURED); cache_->set(length, *str); cache_->set(length + 1, *shared); Script::cast(shared->script())->set_type(Smi::FromInt(type_)); diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index 81b6005..d97a477 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -194,64 +194,6 @@ BUILTIN(EmptyFunction) { } -RUNTIME_FUNCTION(MaybeObject*, ArrayConstructor_StubFailure) { - // If we get 2 arguments then they are the stub parameters (constructor, type - // info). If we get 3, then the first one is a pointer to the arguments - // passed by the caller. - Arguments empty_args(0, NULL); - bool no_caller_args = args.length() == 2; - ASSERT(no_caller_args || args.length() == 3); - int parameters_start = no_caller_args ? 0 : 1; - Arguments* caller_args = no_caller_args - ? &empty_args - : reinterpret_cast(args[0]); - Handle constructor = args.at(parameters_start); - Handle type_info = args.at(parameters_start + 1); - - bool holey = false; - if (caller_args->length() == 1 && (*caller_args)[0]->IsSmi()) { - int value = Smi::cast((*caller_args)[0])->value(); - holey = (value > 0 && value < JSObject::kInitialMaxFastElementArray); - } - - JSArray* array; - MaybeObject* maybe_array; - if (*type_info != isolate->heap()->undefined_value() && - JSGlobalPropertyCell::cast(*type_info)->value()->IsSmi()) { - JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(*type_info); - Smi* smi = Smi::cast(cell->value()); - ElementsKind to_kind = static_cast(smi->value()); - if (holey && !IsFastHoleyElementsKind(to_kind)) { - to_kind = GetHoleyElementsKind(to_kind); - // Update the allocation site info to reflect the advice alteration. - cell->set_value(Smi::FromInt(to_kind)); - } - - maybe_array = isolate->heap()->AllocateJSObjectWithAllocationSite( - *constructor, type_info); - if (!maybe_array->To(&array)) return maybe_array; - } else { - ElementsKind kind = constructor->initial_map()->elements_kind(); - ASSERT(kind == GetInitialFastElementsKind()); - maybe_array = isolate->heap()->AllocateJSObject(*constructor); - if (!maybe_array->To(&array)) return maybe_array; - // We might need to transition to holey - if (holey) { - kind = GetHoleyElementsKind(kind); - maybe_array = array->TransitionElementsKind(kind); - if (maybe_array->IsFailure()) return maybe_array; - } - } - - maybe_array = isolate->heap()->AllocateJSArrayStorage(array, 0, 0, - DONT_INITIALIZE_ARRAY_ELEMENTS); - if (maybe_array->IsFailure()) return maybe_array; - maybe_array = ArrayConstructInitializeElements(array, caller_args); - if (maybe_array->IsFailure()) return maybe_array; - return array; -} - - static MaybeObject* ArrayCodeGenericCommon(Arguments* args, Isolate* isolate, JSFunction* constructor) { @@ -563,7 +505,7 @@ BUILTIN(ArrayPush) { } // Add the provided values. - AssertNoAllocation no_gc; + DisallowHeapAllocation no_gc; WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); for (int index = 0; index < to_add; index++) { elms->set(index + len, args[index + 1], mode); @@ -612,7 +554,7 @@ BUILTIN(ArrayPush) { } // Add the provided values. - AssertNoAllocation no_gc; + DisallowHeapAllocation no_gc; int index; for (index = 0; index < to_add; index++) { Object* arg = args[index + 1]; @@ -695,7 +637,7 @@ BUILTIN(ArrayShift) { // Shift the elements. if (elms_obj->IsFixedArray()) { FixedArray* elms = FixedArray::cast(elms_obj); - AssertNoAllocation no_gc; + DisallowHeapAllocation no_gc; heap->MoveElements(elms, 0, 1, len - 1); elms->set(len - 1, heap->the_hole_value()); } else { @@ -762,12 +704,12 @@ BUILTIN(ArrayUnshift) { elms = new_elms; array->set_elements(elms); } else { - AssertNoAllocation no_gc; + DisallowHeapAllocation no_gc; heap->MoveElements(elms, to_add, 0, len); } // Add the provided values. - AssertNoAllocation no_gc; + DisallowHeapAllocation no_gc; WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); for (int i = 0; i < to_add; i++) { elms->set(i, args[i + 1], mode); @@ -898,7 +840,7 @@ BUILTIN(ArraySlice) { result_len, result_len); - AssertNoAllocation no_gc; + DisallowHeapAllocation no_gc; if (result_len == 0) return maybe_array; if (!maybe_array->To(&result_array)) return maybe_array; @@ -1000,7 +942,7 @@ BUILTIN(ArraySplice) { if (!maybe_array->To(&result_array)) return maybe_array; if (actual_delete_count > 0) { - AssertNoAllocation no_gc; + DisallowHeapAllocation no_gc; ElementsAccessor* accessor = array->GetElementsAccessor(); MaybeObject* maybe_failure = accessor->CopyElements( NULL, actual_start, elements_kind, result_array->elements(), @@ -1025,7 +967,7 @@ BUILTIN(ArraySplice) { MoveDoubleElements(elms, delta, elms, 0, actual_start); } else { FixedArray* elms = FixedArray::cast(elms_obj); - AssertNoAllocation no_gc; + DisallowHeapAllocation no_gc; heap->MoveElements(elms, delta, 0, actual_start); } @@ -1041,7 +983,7 @@ BUILTIN(ArraySplice) { FillWithHoles(elms, new_length, len); } else { FixedArray* elms = FixedArray::cast(elms_obj); - AssertNoAllocation no_gc; + DisallowHeapAllocation no_gc; heap->MoveElements(elms, actual_start + item_count, actual_start + actual_delete_count, (len - actual_delete_count - actual_start)); @@ -1062,7 +1004,7 @@ BUILTIN(ArraySplice) { MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity); if (!maybe_obj->To(&new_elms)) return maybe_obj; - AssertNoAllocation no_gc; + DisallowHeapAllocation no_gc; ElementsKind kind = array->GetElementsKind(); ElementsAccessor* accessor = array->GetElementsAccessor(); @@ -1083,7 +1025,7 @@ BUILTIN(ArraySplice) { elms_obj = new_elms; elms_changed = true; } else { - AssertNoAllocation no_gc; + DisallowHeapAllocation no_gc; heap->MoveElements(elms, actual_start + item_count, actual_start + actual_delete_count, (len - actual_delete_count - actual_start)); @@ -1102,7 +1044,7 @@ BUILTIN(ArraySplice) { } } else { FixedArray* elms = FixedArray::cast(elms_obj); - AssertNoAllocation no_gc; + DisallowHeapAllocation no_gc; WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); for (int k = actual_start; k < actual_start + item_count; k++) { elms->set(k, args[3 + k - actual_start], mode); @@ -1466,6 +1408,11 @@ static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) { } +static void Generate_LoadIC_Slow(MacroAssembler* masm) { + LoadIC::GenerateRuntimeGetProperty(masm); +} + + static void Generate_KeyedLoadIC_Initialize(MacroAssembler* masm) { KeyedLoadIC::GenerateInitialize(masm); } diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h index 58d1a8b..c45fbfd 100644 --- a/deps/v8/src/builtins.h +++ b/deps/v8/src/builtins.h @@ -144,6 +144,8 @@ enum BuiltinExtraArguments { Code::kNoExtraICState) \ V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, \ Code::kNoExtraICState) \ + V(LoadIC_Slow, LOAD_IC, GENERIC, \ + Code::kNoExtraICState) \ \ V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \ Code::kNoExtraICState) \ diff --git a/deps/v8/src/checks.cc b/deps/v8/src/checks.cc index 8bcde1c..8208682 100644 --- a/deps/v8/src/checks.cc +++ b/deps/v8/src/checks.cc @@ -36,6 +36,8 @@ static int fatal_error_handler_nesting_depth = 0; // Contains protection against recursive calls (faults while handling faults). extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) { + i::AllowHandleDereference allow_deref; + i::AllowDeferredHandleDereference allow_deferred_deref; fflush(stdout); fflush(stderr); fatal_error_handler_nesting_depth++; diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc index 6e837dd..99c4db5 100644 --- a/deps/v8/src/code-stubs-hydrogen.cc +++ b/deps/v8/src/code-stubs-hydrogen.cc @@ -36,10 +36,9 @@ namespace internal { static LChunk* OptimizeGraph(HGraph* graph) { - Isolate* isolate = graph->isolate(); - AssertNoAllocation no_gc; - NoHandleAllocation no_handles(isolate); - HandleDereferenceGuard no_deref(isolate, HandleDereferenceGuard::DISALLOW); + DisallowHeapAllocation no_allocation; + DisallowHandleAllocation no_handles; + DisallowHandleDereference no_deref; ASSERT(graph != NULL); SmartArrayPointer bailout_reason; @@ -100,7 +99,23 @@ class CodeStubGraphBuilderBase : public HGraphBuilder { IfBuilder checker_; }; + enum ArgumentClass { + NONE, + SINGLE, + MULTIPLE + }; + + HValue* BuildArrayConstructor(ElementsKind kind, + bool disable_allocation_sites, + ArgumentClass argument_class); + HValue* BuildInternalArrayConstructor(ElementsKind kind, + ArgumentClass argument_class); + private: + HValue* BuildArraySingleArgumentConstructor(JSArrayBuilder* builder); + HValue* BuildArrayNArgumentsConstructor(JSArrayBuilder* builder, + ElementsKind kind); + SmartArrayPointer parameters_; HValue* arguments_length_; CompilationInfoWithZone info_; @@ -148,7 +163,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() { HParameter::REGISTER_PARAMETER, Representation::Integer32()); stack_parameter_count->set_type(HType::Smi()); - // it's essential to bind this value to the environment in case of deopt + // It's essential to bind this value to the environment in case of deopt. AddInstruction(stack_parameter_count); start_environment->Bind(param_count, stack_parameter_count); arguments_length_ = stack_parameter_count; @@ -169,7 +184,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() { HValue* return_value = BuildCodeStub(); // We might have extra expressions to pop from the stack in addition to the - // arguments above + // arguments above. HInstruction* stack_pop_count = stack_parameter_count; if (descriptor_->function_mode_ == JS_FUNCTION_STUB_MODE) { if (!stack_parameter_count->IsConstant() && @@ -186,11 +201,12 @@ bool CodeStubGraphBuilderBase::BuildGraph() { } } - if (!current_block()->IsFinished()) { + if (current_block() != NULL) { HReturn* hreturn_instruction = new(zone) HReturn(return_value, context_, stack_pop_count); current_block()->Finish(hreturn_instruction); + set_current_block(NULL); } return true; } @@ -204,10 +220,10 @@ class CodeStubGraphBuilder: public CodeStubGraphBuilderBase { protected: virtual HValue* BuildCodeStub() { - if (casted_stub()->IsMiss()) { - return BuildCodeInitializedStub(); - } else { + if (casted_stub()->IsUninitialized()) { return BuildCodeUninitializedStub(); + } else { + return BuildCodeInitializedStub(); } } @@ -276,16 +292,17 @@ static Handle DoGenerateCode(Stub* stub) { if (descriptor->register_param_count_ < 0) { stub->InitializeInterfaceDescriptor(isolate, descriptor); } - // The miss case without stack parameters can use a light-weight stub to enter + + // If we are uninitialized we can use a light-weight stub to enter // the runtime that is significantly faster than using the standard // stub-failure deopt mechanism. - if (stub->IsMiss() && descriptor->stack_parameter_count_ == NULL) { + if (stub->IsUninitialized() && descriptor->has_miss_handler()) { + ASSERT(descriptor->stack_parameter_count_ == NULL); return stub->GenerateLightweightMissCode(isolate); - } else { - CodeStubGraphBuilder builder(stub); - LChunk* chunk = OptimizeGraph(builder.CreateGraph()); - return chunk->Codegen(); } + CodeStubGraphBuilder builder(stub); + LChunk* chunk = OptimizeGraph(builder.CreateGraph()); + return chunk->Codegen(); } @@ -358,7 +375,6 @@ Handle FastCloneShallowArrayStub::GenerateCode() { template <> HValue* CodeStubGraphBuilder::BuildCodeStub() { Zone* zone = this->zone(); - Factory* factory = isolate()->factory(); HValue* undefined = graph()->GetConstantUndefined(); HInstruction* boilerplate = @@ -383,24 +399,17 @@ HValue* CodeStubGraphBuilder::BuildCodeStub() { HValue* size_in_bytes = AddInstruction(new(zone) HConstant(size, Representation::Integer32())); HAllocate::Flags flags = HAllocate::CAN_ALLOCATE_IN_NEW_SPACE; - if (FLAG_pretenure_literals) { + if (isolate()->heap()->ShouldGloballyPretenure()) { flags = static_cast( flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE); } - HInstruction* object = - AddInstruction(new(zone) HAllocate(context(), - size_in_bytes, - HType::JSObject(), - flags)); + + HInstruction* object = AddInstruction(new(zone) + HAllocate(context(), size_in_bytes, HType::JSObject(), flags)); for (int i = 0; i < size; i += kPointerSize) { - HInstruction* value = - AddInstruction(new(zone) HLoadNamedField( - boilerplate, true, Representation::Tagged(), i)); - AddInstruction(new(zone) HStoreNamedField(object, - factory->empty_string(), - value, true, - Representation::Tagged(), i)); + HObjectAccess access = HObjectAccess::ForJSObjectOffset(i); + AddStore(object, access, AddLoad(boilerplate, access)); } checker.ElseDeopt(); @@ -418,7 +427,7 @@ HValue* CodeStubGraphBuilder::BuildCodeStub() { HInstruction* load = BuildUncheckedMonomorphicElementAccess( GetParameter(0), GetParameter(1), NULL, NULL, casted_stub()->is_js_array(), casted_stub()->elements_kind(), - false, NEVER_RETURN_HOLE, STANDARD_STORE, Representation::Tagged()); + false, NEVER_RETURN_HOLE, STANDARD_STORE); return load; } @@ -430,11 +439,11 @@ Handle KeyedLoadFastElementStub::GenerateCode() { template<> HValue* CodeStubGraphBuilder::BuildCodeStub() { - Representation representation = casted_stub()->representation(); - HInstruction* load = AddInstruction(DoBuildLoadNamedField( - GetParameter(0), casted_stub()->is_inobject(), - representation, casted_stub()->offset())); - return load; + HObjectAccess access = casted_stub()->is_inobject() ? + HObjectAccess::ForJSObjectOffset(casted_stub()->offset()) : + HObjectAccess::ForBackingStoreOffset(casted_stub()->offset()); + return AddInstruction(BuildLoadNamedField(GetParameter(0), access, + casted_stub()->representation())); } @@ -445,11 +454,11 @@ Handle LoadFieldStub::GenerateCode() { template<> HValue* CodeStubGraphBuilder::BuildCodeStub() { - Representation representation = casted_stub()->representation(); - HInstruction* load = AddInstruction(DoBuildLoadNamedField( - GetParameter(0), casted_stub()->is_inobject(), - representation, casted_stub()->offset())); - return load; + HObjectAccess access = casted_stub()->is_inobject() ? + HObjectAccess::ForJSObjectOffset(casted_stub()->offset()) : + HObjectAccess::ForBackingStoreOffset(casted_stub()->offset()); + return AddInstruction(BuildLoadNamedField(GetParameter(0), access, + casted_stub()->representation())); } @@ -463,8 +472,7 @@ HValue* CodeStubGraphBuilder::BuildCodeStub() { BuildUncheckedMonomorphicElementAccess( GetParameter(0), GetParameter(1), GetParameter(2), NULL, casted_stub()->is_js_array(), casted_stub()->elements_kind(), - true, NEVER_RETURN_HOLE, casted_stub()->store_mode(), - Representation::Tagged()); + true, NEVER_RETURN_HOLE, casted_stub()->store_mode()); return GetParameter(2); } @@ -487,8 +495,8 @@ HValue* CodeStubGraphBuilder::BuildCodeStub() { AddInstruction(new(zone) HTrapAllocationMemento(js_array)); HInstruction* array_length = - AddInstruction(HLoadNamedField::NewArrayLength( - zone, js_array, js_array, HType::Smi())); + AddLoad(js_array, HObjectAccess::ForArrayLength()); + array_length->set_type(HType::Smi()); ElementsKind to_kind = casted_stub()->to_kind(); BuildNewSpaceArrayCheck(array_length, to_kind); @@ -507,27 +515,19 @@ HValue* CodeStubGraphBuilder::BuildCodeStub() { HInstruction* elements_length = AddInstruction(new(zone) HFixedArrayBaseLength(elements)); - HValue* new_elements = - BuildAllocateAndInitializeElements(context(), to_kind, elements_length); + HValue* new_elements = BuildAllocateElementsAndInitializeElementsHeader( + context(), to_kind, elements_length); BuildCopyElements(context(), elements, casted_stub()->from_kind(), new_elements, to_kind, array_length, elements_length); - Factory* factory = isolate()->factory(); - - AddInstruction(new(zone) HStoreNamedField(js_array, - factory->elements_field_string(), - new_elements, true, - Representation::Tagged(), - JSArray::kElementsOffset)); + AddStore(js_array, HObjectAccess::ForElementsPointer(), new_elements); if_builder.End(); - AddInstruction(new(zone) HStoreNamedField(js_array, factory->length_string(), - map, true, - Representation::Tagged(), - JSArray::kMapOffset)); + AddStore(js_array, HObjectAccess::ForMap(), map); + return js_array; } @@ -536,40 +536,56 @@ Handle TransitionElementsKindStub::GenerateCode() { return DoGenerateCode(this); } - -template <> -HValue* CodeStubGraphBuilder::BuildCodeStub() { - // ----------- S t a t e ------------- - // -- Parameter 1 : type info cell - // -- Parameter 0 : constructor - // ----------------------------------- +HValue* CodeStubGraphBuilderBase::BuildArrayConstructor( + ElementsKind kind, bool disable_allocation_sites, + ArgumentClass argument_class) { + HValue* constructor = GetParameter(ArrayConstructorStubBase::kConstructor); + HValue* property_cell = GetParameter(ArrayConstructorStubBase::kPropertyCell); HInstruction* array_function = BuildGetArrayFunction(context()); - ArrayContextChecker(this, - GetParameter(ArrayConstructorStubBase::kConstructor), - array_function); - // Get the right map - // Should be a constant - JSArrayBuilder array_builder( - this, - casted_stub()->elements_kind(), - GetParameter(ArrayConstructorStubBase::kPropertyCell), - casted_stub()->mode()); - return array_builder.AllocateEmptyArray(); + + ArrayContextChecker(this, constructor, array_function); + JSArrayBuilder array_builder(this, kind, property_cell, + disable_allocation_sites); + HValue* result = NULL; + switch (argument_class) { + case NONE: + result = array_builder.AllocateEmptyArray(); + break; + case SINGLE: + result = BuildArraySingleArgumentConstructor(&array_builder); + break; + case MULTIPLE: + result = BuildArrayNArgumentsConstructor(&array_builder, kind); + break; + } + return result; } -Handle ArrayNoArgumentConstructorStub::GenerateCode() { - return DoGenerateCode(this); +HValue* CodeStubGraphBuilderBase::BuildInternalArrayConstructor( + ElementsKind kind, ArgumentClass argument_class) { + HValue* constructor = GetParameter( + InternalArrayConstructorStubBase::kConstructor); + JSArrayBuilder array_builder(this, kind, constructor); + + HValue* result = NULL; + switch (argument_class) { + case NONE: + result = array_builder.AllocateEmptyArray(); + break; + case SINGLE: + result = BuildArraySingleArgumentConstructor(&array_builder); + break; + case MULTIPLE: + result = BuildArrayNArgumentsConstructor(&array_builder, kind); + break; + } + return result; } -template <> -HValue* CodeStubGraphBuilder:: - BuildCodeStub() { - HInstruction* array_function = BuildGetArrayFunction(context()); - ArrayContextChecker(this, - GetParameter(ArrayConstructorStubBase::kConstructor), - array_function); +HValue* CodeStubGraphBuilderBase::BuildArraySingleArgumentConstructor( + JSArrayBuilder* array_builder) { // Smi check and range check on the input arg. HValue* constant_one = graph()->GetConstant1(); HValue* constant_zero = graph()->GetConstant0(); @@ -580,19 +596,13 @@ HValue* CodeStubGraphBuilder:: new(zone()) HAccessArgumentsAt(elements, constant_one, constant_zero)); HConstant* max_alloc_length = - new(zone()) HConstant(JSObject::kInitialMaxFastElementArray, - Representation::Tagged()); + new(zone()) HConstant(JSObject::kInitialMaxFastElementArray); AddInstruction(max_alloc_length); const int initial_capacity = JSArray::kPreallocatedArrayElements; - HConstant* initial_capacity_node = - new(zone()) HConstant(initial_capacity, Representation::Tagged()); + HConstant* initial_capacity_node = new(zone()) HConstant(initial_capacity); AddInstruction(initial_capacity_node); - // Since we're forcing Integer32 representation for this HBoundsCheck, - // there's no need to Smi-check the index. - HBoundsCheck* checked_arg = AddBoundsCheck(argument, max_alloc_length, - ALLOW_SMI_KEY, - Representation::Tagged()); + HBoundsCheck* checked_arg = AddBoundsCheck(argument, max_alloc_length); IfBuilder if_builder(this); if_builder.IfCompare(checked_arg, constant_zero, Token::EQ); if_builder.Then(); @@ -606,46 +616,23 @@ HValue* CodeStubGraphBuilder:: // Figure out total size HValue* length = Pop(); HValue* capacity = Pop(); - - JSArrayBuilder array_builder( - this, - casted_stub()->elements_kind(), - GetParameter(ArrayConstructorStubBase::kPropertyCell), - casted_stub()->mode()); - return array_builder.AllocateArray(capacity, length, true); -} - - -Handle ArraySingleArgumentConstructorStub::GenerateCode() { - return DoGenerateCode(this); + return array_builder->AllocateArray(capacity, length, true); } -template <> -HValue* CodeStubGraphBuilder::BuildCodeStub() { - HInstruction* array_function = BuildGetArrayFunction(context()); - ArrayContextChecker(this, - GetParameter(ArrayConstructorStubBase::kConstructor), - array_function); - ElementsKind kind = casted_stub()->elements_kind(); - HValue* length = GetArgumentsLength(); - - JSArrayBuilder array_builder( - this, - kind, - GetParameter(ArrayConstructorStubBase::kPropertyCell), - casted_stub()->mode()); - +HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor( + JSArrayBuilder* array_builder, ElementsKind kind) { // We need to fill with the hole if it's a smi array in the multi-argument // case because we might have to bail out while copying arguments into // the array because they aren't compatible with a smi array. // If it's a double array, no problem, and if it's fast then no // problem either because doubles are boxed. + HValue* length = GetArgumentsLength(); bool fill_with_hole = IsFastSmiElementsKind(kind); - HValue* new_object = array_builder.AllocateArray(length, - length, - fill_with_hole); - HValue* elements = array_builder.GetElementsLocation(); + HValue* new_object = array_builder->AllocateArray(length, + length, + fill_with_hole); + HValue* elements = array_builder->GetElementsLocation(); ASSERT(elements != NULL); // Now populate the elements correctly. @@ -659,39 +646,108 @@ HValue* CodeStubGraphBuilder::BuildCodeStub() { HInstruction* argument = AddInstruction(new(zone()) HAccessArgumentsAt( argument_elements, length, key)); - // Checks to prevent incompatible stores - if (IsFastSmiElementsKind(kind)) { - AddInstruction(new(zone()) HCheckSmi(argument)); - } - AddInstruction(new(zone()) HStoreKeyed(elements, key, argument, kind)); builder.EndBody(); return new_object; } +template <> +HValue* CodeStubGraphBuilder::BuildCodeStub() { + ElementsKind kind = casted_stub()->elements_kind(); + bool disable_allocation_sites = casted_stub()->disable_allocation_sites(); + return BuildArrayConstructor(kind, disable_allocation_sites, NONE); +} + + +Handle ArrayNoArgumentConstructorStub::GenerateCode() { + return DoGenerateCode(this); +} + + +template <> +HValue* CodeStubGraphBuilder:: + BuildCodeStub() { + ElementsKind kind = casted_stub()->elements_kind(); + bool disable_allocation_sites = casted_stub()->disable_allocation_sites(); + return BuildArrayConstructor(kind, disable_allocation_sites, SINGLE); +} + + +Handle ArraySingleArgumentConstructorStub::GenerateCode() { + return DoGenerateCode(this); +} + + +template <> +HValue* CodeStubGraphBuilder::BuildCodeStub() { + ElementsKind kind = casted_stub()->elements_kind(); + bool disable_allocation_sites = casted_stub()->disable_allocation_sites(); + return BuildArrayConstructor(kind, disable_allocation_sites, MULTIPLE); +} + + Handle ArrayNArgumentsConstructorStub::GenerateCode() { return DoGenerateCode(this); } template <> -HValue* CodeStubGraphBuilder::BuildCodeUninitializedStub() { +HValue* CodeStubGraphBuilder:: + BuildCodeStub() { + ElementsKind kind = casted_stub()->elements_kind(); + return BuildInternalArrayConstructor(kind, NONE); +} + + +Handle InternalArrayNoArgumentConstructorStub::GenerateCode() { + return DoGenerateCode(this); +} + + +template <> +HValue* CodeStubGraphBuilder:: + BuildCodeStub() { + ElementsKind kind = casted_stub()->elements_kind(); + return BuildInternalArrayConstructor(kind, SINGLE); +} + + +Handle InternalArraySingleArgumentConstructorStub::GenerateCode() { + return DoGenerateCode(this); +} + + +template <> +HValue* CodeStubGraphBuilder:: + BuildCodeStub() { + ElementsKind kind = casted_stub()->elements_kind(); + return BuildInternalArrayConstructor(kind, MULTIPLE); +} + + +Handle InternalArrayNArgumentsConstructorStub::GenerateCode() { + return DoGenerateCode(this); +} + + +template <> +HValue* CodeStubGraphBuilder::BuildCodeInitializedStub() { CompareNilICStub* stub = casted_stub(); HIfContinuation continuation; Handle sentinel_map(graph()->isolate()->heap()->meta_map()); - BuildCompareNil(GetParameter(0), stub->GetKind(), + BuildCompareNil(GetParameter(0), stub->GetTypes(), sentinel_map, RelocInfo::kNoPosition, &continuation); IfBuilder if_nil(this, &continuation); if_nil.Then(); if (continuation.IsFalseReachable()) { if_nil.Else(); - if_nil.Return(graph()->GetConstantSmi0()); + if_nil.Return(graph()->GetConstant0()); } if_nil.End(); return continuation.IsTrueReachable() - ? graph()->GetConstantSmi1() + ? graph()->GetConstant1() : graph()->GetConstantUndefined(); } @@ -700,4 +756,24 @@ Handle CompareNilICStub::GenerateCode() { return DoGenerateCode(this); } + +template <> +HValue* CodeStubGraphBuilder::BuildCodeInitializedStub() { + ToBooleanStub* stub = casted_stub(); + + IfBuilder if_true(this); + if_true.If(GetParameter(0), stub->GetTypes()); + if_true.Then(); + if_true.Return(graph()->GetConstant1()); + if_true.Else(); + if_true.End(); + return graph()->GetConstant0(); +} + + +Handle ToBooleanStub::GenerateCode() { + return DoGenerateCode(this); +} + + } } // namespace v8::internal diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc index 312febc..6b6e250 100644 --- a/deps/v8/src/code-stubs.cc +++ b/deps/v8/src/code-stubs.cc @@ -45,7 +45,8 @@ CodeStubInterfaceDescriptor::CodeStubInterfaceDescriptor() function_mode_(NOT_JS_FUNCTION_STUB_MODE), register_params_(NULL), deoptimization_handler_(NULL), - miss_handler_(IC_Utility(IC::kUnreachable), Isolate::Current()) { } + miss_handler_(IC_Utility(IC::kUnreachable), Isolate::Current()), + has_miss_handler_(false) { } bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) { @@ -304,6 +305,27 @@ void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) { } +InlineCacheState ICCompareStub::GetICState() { + CompareIC::State state = Max(left_, right_); + switch (state) { + case CompareIC::UNINITIALIZED: + return ::v8::internal::UNINITIALIZED; + case CompareIC::SMI: + case CompareIC::NUMBER: + case CompareIC::INTERNALIZED_STRING: + case CompareIC::STRING: + case CompareIC::UNIQUE_NAME: + case CompareIC::OBJECT: + case CompareIC::KNOWN_OBJECT: + return MONOMORPHIC; + case CompareIC::GENERIC: + return ::v8::internal::GENERIC; + } + UNREACHABLE(); + return ::v8::internal::UNINITIALIZED; +} + + void ICCompareStub::AddToSpecialCache(Handle new_object) { ASSERT(*known_map_ != NULL); Isolate* isolate = new_object->GetIsolate(); @@ -410,36 +432,44 @@ void ICCompareStub::Generate(MacroAssembler* masm) { void CompareNilICStub::Record(Handle object) { ASSERT(types_ != Types::FullCompare()); - if (equality_kind_ == kStrictEquality) { - // When testing for strict equality only one value will evaluate to true - types_.RemoveAll(); - types_.Add((nil_value_ == kNullValue) ? NULL_TYPE: - UNDEFINED); + if (object->IsNull()) { + types_.Add(NULL_TYPE); + } else if (object->IsUndefined()) { + types_.Add(UNDEFINED); + } else if (object->IsUndetectableObject() || + object->IsOddball() || + !object->IsHeapObject()) { + types_ = Types::FullCompare(); + } else if (IsMonomorphic()) { + types_ = Types::FullCompare(); } else { - if (object->IsNull()) { - types_.Add(NULL_TYPE); - } else if (object->IsUndefined()) { - types_.Add(UNDEFINED); - } else if (object->IsUndetectableObject() || - object->IsOddball() || - !object->IsHeapObject()) { - types_ = Types::FullCompare(); - } else if (IsMonomorphic()) { - types_ = Types::FullCompare(); - } else { - types_.Add(MONOMORPHIC_MAP); - } + types_.Add(MONOMORPHIC_MAP); } } +void CompareNilICStub::Types::TraceTransition(Types to) const { + #ifdef DEBUG + if (!FLAG_trace_ic) return; + char buffer[100]; + NoAllocationStringAllocator allocator(buffer, + static_cast(sizeof(buffer))); + StringStream stream(&allocator); + stream.Add("[CompareNilIC : "); + Print(&stream); + stream.Add("=>"); + to.Print(&stream); + stream.Add("]\n"); + stream.OutputToStdOut(); + #endif +} + + void CompareNilICStub::PrintName(StringStream* stream) { stream->Add("CompareNilICStub_"); types_.Print(stream); stream->Add((nil_value_ == kNullValue) ? "(NullValue|": "(UndefinedValue|"); - stream->Add((equality_kind_ == kStrictEquality) ? "StrictEquality)": - "NonStrictEquality)"); } @@ -554,6 +584,14 @@ void CallConstructStub::PrintName(StringStream* stream) { } +bool ToBooleanStub::Record(Handle object) { + Types old_types(types_); + bool to_boolean_value = types_.Record(object); + old_types.TraceTransition(types_); + return to_boolean_value; +} + + void ToBooleanStub::PrintName(StringStream* stream) { stream->Add("ToBooleanStub_"); types_.Print(stream); @@ -577,17 +615,19 @@ void ToBooleanStub::Types::Print(StringStream* stream) const { void ToBooleanStub::Types::TraceTransition(Types to) const { + #ifdef DEBUG if (!FLAG_trace_ic) return; char buffer[100]; NoAllocationStringAllocator allocator(buffer, static_cast(sizeof(buffer))); StringStream stream(&allocator); - stream.Add("[ToBooleanIC ("); + stream.Add("[ToBooleanIC : "); Print(&stream); - stream.Add("->"); + stream.Add("=>"); to.Print(&stream); - stream.Add(")]\n"); + stream.Add("]\n"); stream.OutputToStdOut(); + #endif } @@ -749,4 +789,19 @@ ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate, } +void InternalArrayConstructorStubBase::InstallDescriptors(Isolate* isolate) { + InternalArrayNoArgumentConstructorStub stub1(FAST_ELEMENTS); + InstallDescriptor(isolate, &stub1); + InternalArraySingleArgumentConstructorStub stub2(FAST_ELEMENTS); + InstallDescriptor(isolate, &stub2); + InternalArrayNArgumentsConstructorStub stub3(FAST_ELEMENTS); + InstallDescriptor(isolate, &stub3); +} + +InternalArrayConstructorStub::InternalArrayConstructorStub( + Isolate* isolate) { + InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); +} + + } } // namespace v8::internal diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h index aa6a410..0ea7ac9 100644 --- a/deps/v8/src/code-stubs.h +++ b/deps/v8/src/code-stubs.h @@ -77,6 +77,9 @@ namespace internal { V(ArrayNoArgumentConstructor) \ V(ArraySingleArgumentConstructor) \ V(ArrayNArgumentsConstructor) \ + V(InternalArrayNoArgumentConstructor) \ + V(InternalArraySingleArgumentConstructor) \ + V(InternalArrayNArgumentsConstructor) \ V(KeyedStoreElement) \ V(DebuggerStatement) \ V(NameDictionaryLookup) \ @@ -85,6 +88,7 @@ namespace internal { V(StoreArrayLiteralElement) \ V(StubFailureTrampoline) \ V(ArrayConstructor) \ + V(InternalArrayConstructor) \ V(ProfileEntryHook) \ /* IC Handler stubs */ \ V(LoadField) \ @@ -277,7 +281,6 @@ struct CodeStubInterfaceDescriptor { StubFunctionMode function_mode_; Register* register_params_; Address deoptimization_handler_; - ExternalReference miss_handler_; int environment_length() const { if (stack_parameter_count_ != NULL) { @@ -287,6 +290,24 @@ struct CodeStubInterfaceDescriptor { } bool initialized() const { return register_param_count_ >= 0; } + + void SetMissHandler(ExternalReference handler) { + miss_handler_ = handler; + has_miss_handler_ = true; + } + + ExternalReference miss_handler() { + ASSERT(has_miss_handler_); + return miss_handler_; + } + + bool has_miss_handler() { + return has_miss_handler_; + } + + private: + ExternalReference miss_handler_; + bool has_miss_handler_; }; // A helper to make up for the fact that type Register is not fully @@ -300,12 +321,12 @@ struct CodeStubInterfaceDescriptor { class HydrogenCodeStub : public CodeStub { public: enum InitializationState { - CODE_STUB_IS_NOT_MISS, - CODE_STUB_IS_MISS + UNINITIALIZED, + INITIALIZED }; - explicit HydrogenCodeStub(InitializationState state) { - is_miss_ = (state == CODE_STUB_IS_MISS); + explicit HydrogenCodeStub(InitializationState state = INITIALIZED) { + is_uninitialized_ = (state == UNINITIALIZED); } virtual Code::Kind GetCodeKind() const { return Code::STUB; } @@ -314,7 +335,7 @@ class HydrogenCodeStub : public CodeStub { return isolate->code_stub_interface_descriptor(MajorKey()); } - bool IsMiss() { return is_miss_; } + bool IsUninitialized() { return is_uninitialized_; } template static Handle GetUninitialized(Isolate* isolate) { @@ -339,11 +360,11 @@ class HydrogenCodeStub : public CodeStub { void GenerateLightweightMiss(MacroAssembler* masm); virtual int MinorKey() { - return IsMissBits::encode(is_miss_) | + return IsMissBits::encode(is_uninitialized_) | MinorKeyBits::encode(NotMissMinorKey()); } - bool is_miss_; + bool is_uninitialized_; }; @@ -516,8 +537,7 @@ class FastCloneShallowArrayStub : public HydrogenCodeStub { FastCloneShallowArrayStub(Mode mode, AllocationSiteMode allocation_site_mode, int length) - : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS), - mode_(mode), + : mode_(mode), allocation_site_mode_(allocation_site_mode), length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) { ASSERT_GE(length_, 0); @@ -577,8 +597,7 @@ class FastCloneShallowObjectStub : public HydrogenCodeStub { static const int kMaximumClonedProperties = 6; explicit FastCloneShallowObjectStub(int length) - : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS), - length_(length) { + : length_(length) { ASSERT_GE(length_, 0); ASSERT_LE(length_, kMaximumClonedProperties); } @@ -655,9 +674,23 @@ class ArrayConstructorStub: public PlatformCodeStub { }; +class InternalArrayConstructorStub: public PlatformCodeStub { + public: + explicit InternalArrayConstructorStub(Isolate* isolate); + + void Generate(MacroAssembler* masm); + + private: + virtual CodeStub::Major MajorKey() { return InternalArrayConstructor; } + virtual int MinorKey() { return 0; } + + void GenerateCase(MacroAssembler* masm, ElementsKind kind); +}; + + class MathPowStub: public PlatformCodeStub { public: - enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK}; + enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK }; explicit MathPowStub(ExponentType exponent_type) : exponent_type_(exponent_type) { } @@ -763,7 +796,7 @@ class HICStub: public HydrogenCodeStub { virtual InlineCacheState GetICState() { return MONOMORPHIC; } protected: - HICStub() : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) { } + HICStub() { } class KindBits: public BitField {}; virtual Code::Kind kind() const = 0; }; @@ -870,7 +903,9 @@ class BinaryOpStub: public PlatformCodeStub { platform_specific_bit_(false), left_type_(BinaryOpIC::UNINITIALIZED), right_type_(BinaryOpIC::UNINITIALIZED), - result_type_(BinaryOpIC::UNINITIALIZED) { + result_type_(BinaryOpIC::UNINITIALIZED), + has_fixed_right_arg_(false), + encoded_right_arg_(encode_arg_value(1)) { Initialize(); ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); } @@ -879,13 +914,17 @@ class BinaryOpStub: public PlatformCodeStub { int key, BinaryOpIC::TypeInfo left_type, BinaryOpIC::TypeInfo right_type, - BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED) + BinaryOpIC::TypeInfo result_type, + bool has_fixed_right_arg, + int32_t fixed_right_arg_value) : op_(OpBits::decode(key)), mode_(ModeBits::decode(key)), platform_specific_bit_(PlatformSpecificBits::decode(key)), left_type_(left_type), right_type_(right_type), - result_type_(result_type) { } + result_type_(result_type), + has_fixed_right_arg_(has_fixed_right_arg), + encoded_right_arg_(encode_arg_value(fixed_right_arg_value)) { } static void decode_types_from_minor_key(int minor_key, BinaryOpIC::TypeInfo* left_type, @@ -903,6 +942,24 @@ class BinaryOpStub: public PlatformCodeStub { return static_cast(OpBits::decode(minor_key)); } + static bool decode_has_fixed_right_arg_from_minor_key(int minor_key) { + return HasFixedRightArgBits::decode(minor_key); + } + + static int decode_fixed_right_arg_value_from_minor_key(int minor_key) { + return decode_arg_value(FixedRightArgValueBits::decode(minor_key)); + } + + int fixed_right_arg_value() const { + return decode_arg_value(encoded_right_arg_); + } + + static bool can_encode_arg_value(int32_t value) { + return value > 0 && + IsPowerOf2(value) && + FixedRightArgValueBits::is_valid(WhichPowerOf2(value)); + } + enum SmiCodeGenerateHeapNumberResults { ALLOW_HEAPNUMBER_RESULTS, NO_HEAPNUMBER_RESULTS @@ -918,15 +975,31 @@ class BinaryOpStub: public PlatformCodeStub { BinaryOpIC::TypeInfo right_type_; BinaryOpIC::TypeInfo result_type_; + bool has_fixed_right_arg_; + int encoded_right_arg_; + + static int encode_arg_value(int32_t value) { + ASSERT(can_encode_arg_value(value)); + return WhichPowerOf2(value); + } + + static int32_t decode_arg_value(int value) { + return 1 << value; + } + virtual void PrintName(StringStream* stream); - // Minor key encoding in 19 bits TTTRRRLLLSOOOOOOOMM. + // Minor key encoding in all 25 bits FFFFFHTTTRRRLLLPOOOOOOOMM. + // Note: We actually do not need 7 bits for the operation, just 4 bits to + // encode ADD, SUB, MUL, DIV, MOD, BIT_OR, BIT_AND, BIT_XOR, SAR, SHL, SHR. class ModeBits: public BitField {}; class OpBits: public BitField {}; class PlatformSpecificBits: public BitField {}; class LeftTypeBits: public BitField {}; class RightTypeBits: public BitField {}; class ResultTypeBits: public BitField {}; + class HasFixedRightArgBits: public BitField {}; + class FixedRightArgValueBits: public BitField {}; Major MajorKey() { return BinaryOp; } int MinorKey() { @@ -935,7 +1008,9 @@ class BinaryOpStub: public PlatformCodeStub { | PlatformSpecificBits::encode(platform_specific_bit_) | LeftTypeBits::encode(left_type_) | RightTypeBits::encode(right_type_) - | ResultTypeBits::encode(result_type_); + | ResultTypeBits::encode(result_type_) + | HasFixedRightArgBits::encode(has_fixed_right_arg_) + | FixedRightArgValueBits::encode(encoded_right_arg_); } @@ -1005,6 +1080,8 @@ class ICCompareStub: public PlatformCodeStub { return static_cast(HandlerStateField::decode(minor_key)); } + virtual InlineCacheState GetICState(); + private: class OpField: public BitField { }; class LeftStateField: public BitField { }; @@ -1069,6 +1146,7 @@ class CompareNilICStub : public HydrogenCodeStub { } void Print(StringStream* stream) const; + void TraceTransition(Types to) const; }; // At most 6 different types can be distinguished, because the Code object @@ -1076,23 +1154,21 @@ class CompareNilICStub : public HydrogenCodeStub { // boolean flags we need to store. :-P STATIC_ASSERT(NUMBER_OF_TYPES <= 6); - CompareNilICStub(EqualityKind kind, NilValue nil, Types types) - : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS), types_(types) { - equality_kind_ = kind; + CompareNilICStub(NilValue nil, Types types = Types()) + : types_(types) { nil_value_ = nil; } - explicit CompareNilICStub(Code::ExtraICState ic_state) - : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) { - equality_kind_ = EqualityKindField::decode(ic_state); + CompareNilICStub(Code::ExtraICState ic_state, + InitializationState init_state = INITIALIZED) + : HydrogenCodeStub(init_state) { nil_value_ = NilValueField::decode(ic_state); types_ = Types(ExtractTypesFromExtraICState(ic_state)); } static Handle GetUninitialized(Isolate* isolate, - EqualityKind kind, NilValue nil) { - return CompareNilICStub(kind, nil, CODE_STUB_IS_MISS).GetCode(isolate); + return CompareNilICStub(nil, UNINITIALIZED).GetCode(isolate); } virtual void InitializeInterfaceDescriptor( @@ -1100,8 +1176,7 @@ class CompareNilICStub : public HydrogenCodeStub { CodeStubInterfaceDescriptor* descriptor); static void InitializeForIsolate(Isolate* isolate) { - CompareNilICStub compare_stub(kStrictEquality, kNullValue, - CODE_STUB_IS_MISS); + CompareNilICStub compare_stub(kNullValue, UNINITIALIZED); compare_stub.InitializeInterfaceDescriptor( isolate, isolate->code_stub_interface_descriptor(CodeStub::CompareNilIC)); @@ -1121,53 +1196,38 @@ class CompareNilICStub : public HydrogenCodeStub { Handle GenerateCode(); - // extra ic state = nil_value | equality_kind | type_n-1 | ... | type_0 + // extra ic state = nil_value | type_n-1 | ... | type_0 virtual Code::ExtraICState GetExtraICState() { return NilValueField::encode(nil_value_) | - EqualityKindField::encode(equality_kind_) | types_.ToIntegral(); } static byte ExtractTypesFromExtraICState( Code::ExtraICState state) { - return state & ((1< object); bool IsMonomorphic() const { return types_.Contains(MONOMORPHIC_MAP); } - EqualityKind GetKind() const { return equality_kind_; } NilValue GetNilValue() const { return nil_value_; } Types GetTypes() const { return types_; } void ClearTypes() { types_.RemoveAll(); } - void SetKind(EqualityKind kind) { equality_kind_ = kind; } virtual void PrintName(StringStream* stream); private: friend class CompareNilIC; - CompareNilICStub(EqualityKind kind, NilValue nil, - InitializationState init_state) - : HydrogenCodeStub(init_state), types_(0) { - equality_kind_ = kind; - nil_value_ = nil; - } - - CompareNilICStub(Code::ExtraICState ic_state, InitializationState init_state) + CompareNilICStub(NilValue nil, InitializationState init_state) : HydrogenCodeStub(init_state) { - equality_kind_ = EqualityKindField::decode(ic_state); - nil_value_ = NilValueField::decode(ic_state); - types_ = Types(ExtractTypesFromExtraICState(ic_state)); + nil_value_ = nil; } - class EqualityKindField : public BitField { - }; - class NilValueField : public BitField {}; + class NilValueField : public BitField {}; virtual CodeStub::Major MajorKey() { return CompareNilIC; } virtual int NotMissMinorKey() { return GetExtraICState(); } - EqualityKind equality_kind_; NilValue nil_value_; Types types_; @@ -1567,8 +1627,7 @@ class KeyedLoadDictionaryElementStub : public PlatformCodeStub { class KeyedLoadFastElementStub : public HydrogenCodeStub { public: - KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) - : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) { + KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) { bit_field_ = ElementsKindBits::encode(elements_kind) | IsJSArrayBits::encode(is_js_array); } @@ -1603,8 +1662,7 @@ class KeyedStoreFastElementStub : public HydrogenCodeStub { public: KeyedStoreFastElementStub(bool is_js_array, ElementsKind elements_kind, - KeyedAccessStoreMode mode) - : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) { + KeyedAccessStoreMode mode) { bit_field_ = ElementsKindBits::encode(elements_kind) | IsJSArrayBits::encode(is_js_array) | StoreModeBits::encode(mode); @@ -1644,8 +1702,7 @@ class KeyedStoreFastElementStub : public HydrogenCodeStub { class TransitionElementsKindStub : public HydrogenCodeStub { public: TransitionElementsKindStub(ElementsKind from_kind, - ElementsKind to_kind) - : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) { + ElementsKind to_kind) { bit_field_ = FromKindBits::encode(from_kind) | ToKindBits::encode(to_kind); } @@ -1678,20 +1735,22 @@ class TransitionElementsKindStub : public HydrogenCodeStub { class ArrayConstructorStubBase : public HydrogenCodeStub { public: - ArrayConstructorStubBase(ElementsKind kind, AllocationSiteMode mode) - : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) { + ArrayConstructorStubBase(ElementsKind kind, bool disable_allocation_sites) { + // It only makes sense to override local allocation site behavior + // if there is a difference between the global allocation site policy + // for an ElementsKind and the desired usage of the stub. + ASSERT(!disable_allocation_sites || + AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE); bit_field_ = ElementsKindBits::encode(kind) | - AllocationSiteModeBits::encode(mode == TRACK_ALLOCATION_SITE); + DisableAllocationSitesBits::encode(disable_allocation_sites); } ElementsKind elements_kind() const { return ElementsKindBits::decode(bit_field_); } - AllocationSiteMode mode() const { - return AllocationSiteModeBits::decode(bit_field_) - ? TRACK_ALLOCATION_SITE - : DONT_TRACK_ALLOCATION_SITE; + bool disable_allocation_sites() const { + return DisableAllocationSitesBits::decode(bit_field_); } virtual bool IsPregenerated() { return true; } @@ -1706,7 +1765,7 @@ class ArrayConstructorStubBase : public HydrogenCodeStub { int NotMissMinorKey() { return bit_field_; } class ElementsKindBits: public BitField {}; - class AllocationSiteModeBits: public BitField {}; + class DisableAllocationSitesBits: public BitField {}; uint32_t bit_field_; DISALLOW_COPY_AND_ASSIGN(ArrayConstructorStubBase); @@ -1717,8 +1776,8 @@ class ArrayNoArgumentConstructorStub : public ArrayConstructorStubBase { public: ArrayNoArgumentConstructorStub( ElementsKind kind, - AllocationSiteMode mode = TRACK_ALLOCATION_SITE) - : ArrayConstructorStubBase(kind, mode) { + bool disable_allocation_sites = false) + : ArrayConstructorStubBase(kind, disable_allocation_sites) { } virtual Handle GenerateCode(); @@ -1738,8 +1797,8 @@ class ArraySingleArgumentConstructorStub : public ArrayConstructorStubBase { public: ArraySingleArgumentConstructorStub( ElementsKind kind, - AllocationSiteMode mode = TRACK_ALLOCATION_SITE) - : ArrayConstructorStubBase(kind, mode) { + bool disable_allocation_sites = false) + : ArrayConstructorStubBase(kind, disable_allocation_sites) { } virtual Handle GenerateCode(); @@ -1759,8 +1818,8 @@ class ArrayNArgumentsConstructorStub : public ArrayConstructorStubBase { public: ArrayNArgumentsConstructorStub( ElementsKind kind, - AllocationSiteMode mode = TRACK_ALLOCATION_SITE) : - ArrayConstructorStubBase(kind, mode) { + bool disable_allocation_sites = false) + : ArrayConstructorStubBase(kind, disable_allocation_sites) { } virtual Handle GenerateCode(); @@ -1776,6 +1835,87 @@ class ArrayNArgumentsConstructorStub : public ArrayConstructorStubBase { }; +class InternalArrayConstructorStubBase : public HydrogenCodeStub { + public: + explicit InternalArrayConstructorStubBase(ElementsKind kind) { + kind_ = kind; + } + + virtual bool IsPregenerated() { return true; } + static void GenerateStubsAheadOfTime(Isolate* isolate); + static void InstallDescriptors(Isolate* isolate); + + // Parameters accessed via CodeStubGraphBuilder::GetParameter() + static const int kConstructor = 0; + + ElementsKind elements_kind() const { return kind_; } + + private: + int NotMissMinorKey() { return kind_; } + + ElementsKind kind_; + + DISALLOW_COPY_AND_ASSIGN(InternalArrayConstructorStubBase); +}; + + +class InternalArrayNoArgumentConstructorStub : public + InternalArrayConstructorStubBase { + public: + explicit InternalArrayNoArgumentConstructorStub(ElementsKind kind) + : InternalArrayConstructorStubBase(kind) { } + + virtual Handle GenerateCode(); + + virtual void InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor); + + private: + Major MajorKey() { return InternalArrayNoArgumentConstructor; } + + DISALLOW_COPY_AND_ASSIGN(InternalArrayNoArgumentConstructorStub); +}; + + +class InternalArraySingleArgumentConstructorStub : public + InternalArrayConstructorStubBase { + public: + explicit InternalArraySingleArgumentConstructorStub(ElementsKind kind) + : InternalArrayConstructorStubBase(kind) { } + + virtual Handle GenerateCode(); + + virtual void InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor); + + private: + Major MajorKey() { return InternalArraySingleArgumentConstructor; } + + DISALLOW_COPY_AND_ASSIGN(InternalArraySingleArgumentConstructorStub); +}; + + +class InternalArrayNArgumentsConstructorStub : public + InternalArrayConstructorStubBase { + public: + explicit InternalArrayNArgumentsConstructorStub(ElementsKind kind) + : InternalArrayConstructorStubBase(kind) { } + + virtual Handle GenerateCode(); + + virtual void InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor); + + private: + Major MajorKey() { return InternalArrayNArgumentsConstructor; } + + DISALLOW_COPY_AND_ASSIGN(InternalArrayNArgumentsConstructorStub); +}; + + class KeyedStoreElementStub : public PlatformCodeStub { public: KeyedStoreElementStub(bool is_js_array, @@ -1811,7 +1951,7 @@ class KeyedStoreElementStub : public PlatformCodeStub { }; -class ToBooleanStub: public PlatformCodeStub { +class ToBooleanStub: public HydrogenCodeStub { public: enum Type { UNDEFINED, @@ -1845,31 +1985,54 @@ class ToBooleanStub: public PlatformCodeStub { static Types no_types() { return Types(); } static Types all_types() { return Types((1 << NUMBER_OF_TYPES) - 1); } - explicit ToBooleanStub(Register tos, Types types = Types()) - : tos_(tos), types_(types) { } + explicit ToBooleanStub(Types types = Types()) + : types_(types) { } + explicit ToBooleanStub(Code::ExtraICState state) + : types_(static_cast(state)) { } + + bool Record(Handle object); + Types GetTypes() { return types_; } + + virtual Handle GenerateCode(); + virtual void InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor); - void Generate(MacroAssembler* masm); virtual Code::Kind GetCodeKind() const { return Code::TO_BOOLEAN_IC; } virtual void PrintName(StringStream* stream); virtual bool SometimesSetsUpAFrame() { return false; } - private: - Major MajorKey() { return ToBoolean; } - int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) | - types_.ToByte(); } + static void InitializeForIsolate(Isolate* isolate) { + ToBooleanStub stub; + stub.InitializeInterfaceDescriptor( + isolate, + isolate->code_stub_interface_descriptor(CodeStub::ToBoolean)); + } - virtual void FinishCode(Handle code) { - code->set_to_boolean_state(types_.ToByte()); + static Handle GetUninitialized(Isolate* isolate) { + return ToBooleanStub(UNINITIALIZED).GetCode(isolate); } - void CheckOddball(MacroAssembler* masm, - Type type, - Heap::RootListIndex value, - bool result); - void GenerateTypeTransition(MacroAssembler* masm); + virtual Code::ExtraICState GetExtraICState() { + return types_.ToIntegral(); + } + + virtual InlineCacheState GetICState() { + if (types_.IsEmpty()) { + return ::v8::internal::UNINITIALIZED; + } else { + return MONOMORPHIC; + } + } + + private: + Major MajorKey() { return ToBoolean; } + int NotMissMinorKey() { return GetExtraICState(); } + + explicit ToBooleanStub(InitializationState init_state) : + HydrogenCodeStub(init_state) {} - Register tos_; Types types_; }; diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc index ff4003c..af2f1f6 100644 --- a/deps/v8/src/codegen.cc +++ b/deps/v8/src/codegen.cc @@ -58,13 +58,12 @@ Comment::~Comment() { #undef __ -void CodeGenerator::MakeCodePrologue(CompilationInfo* info) { -#ifdef DEBUG +void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) { bool print_source = false; bool print_ast = false; const char* ftype; - if (Isolate::Current()->bootstrapper()->IsActive()) { + if (info->isolate()->bootstrapper()->IsActive()) { print_source = FLAG_print_builtin_source; print_ast = FLAG_print_builtin_ast; ftype = "builtin"; @@ -75,17 +74,18 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info) { } if (FLAG_trace_codegen || print_source || print_ast) { - PrintF("*** Generate code for %s function: ", ftype); + PrintF("[generating %s code for %s function: ", kind, ftype); if (info->IsStub()) { const char* name = CodeStub::MajorName(info->code_stub()->MajorKey(), true); PrintF("%s", name == NULL ? "" : name); } else { - info->function()->name()->ShortPrint(); + PrintF("%s", *info->function()->debug_name()->ToCString()); } - PrintF(" ***\n"); + PrintF("]\n"); } +#ifdef DEBUG if (!info->IsStub() && print_source) { PrintF("--- Source from AST ---\n%s\n", PrettyPrinter().PrintProgram(info->function())); diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h index 09907c4..53ff2e1 100644 --- a/deps/v8/src/codegen.h +++ b/deps/v8/src/codegen.h @@ -113,18 +113,6 @@ class ElementsTransitionGenerator : public AllStatic { }; -class SeqStringSetCharGenerator : public AllStatic { - public: - static void Generate(MacroAssembler* masm, - String::Encoding encoding, - Register string, - Register index, - Register value); - private: - DISALLOW_COPY_AND_ASSIGN(SeqStringSetCharGenerator); -}; - - } } // namespace v8::internal #endif // V8_CODEGEN_H_ diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index 5045758..5fc107f 100644 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -36,6 +36,7 @@ #include "deoptimizer.h" #include "full-codegen.h" #include "gdb-jit.h" +#include "typing.h" #include "hydrogen.h" #include "isolate-inl.h" #include "lithium.h" @@ -361,11 +362,11 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() { PrintF("Compiling method %s using hydrogen\n", *name->ToCString()); isolate()->GetHTracer()->TraceCompilation(info()); } - Handle native_context( - info()->closure()->context()->native_context()); - oracle_ = new(info()->zone()) TypeFeedbackOracle( - code, native_context, isolate(), info()->zone()); - graph_builder_ = new(info()->zone()) HOptimizedGraphBuilder(info(), oracle_); + + // Type-check the function. + AstTyper::Type(info()); + + graph_builder_ = new(info()->zone()) HOptimizedGraphBuilder(info()); Timer t(this, &time_taken_to_create_graph_); graph_ = graph_builder_->CreateGraph(); @@ -392,9 +393,9 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() { } OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() { - AssertNoAllocation no_gc; - NoHandleAllocation no_handles(isolate()); - HandleDereferenceGuard no_deref(isolate(), HandleDereferenceGuard::DISALLOW); + DisallowHeapAllocation no_allocation; + DisallowHandleAllocation no_handles; + DisallowHandleDereference no_deref; ASSERT(last_status() == SUCCEEDED); Timer t(this, &time_taken_to_optimize_); @@ -423,8 +424,7 @@ OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() { // graph creation. To make sure that we don't encounter inconsistencies // between graph creation and code generation, we disallow accessing // objects through deferred handles during the latter, with exceptions. - HandleDereferenceGuard no_deref_deferred( - isolate(), HandleDereferenceGuard::DISALLOW_DEFERRED); + DisallowDeferredHandleDereference no_deferred_handle_deref; Handle optimized_code = chunk_->Codegen(); if (optimized_code.is_null()) { info()->set_bailout_reason("code generation failed"); @@ -649,7 +649,7 @@ Handle Compiler::Compile(Handle source, // in that case too. // Create a script object describing the script to be compiled. - Handle