From 073947c150316cfc0bd440851e590663c3b67814 Mon Sep 17 00:00:00 2001 From: Ryan Dahl Date: Wed, 10 Mar 2010 10:50:46 -0800 Subject: [PATCH] Upgrade V8 to 2.1.3 --- deps/v8/ChangeLog | 19 + deps/v8/SConstruct | 40 +- deps/v8/include/v8.h | 36 +- deps/v8/src/SConscript | 2 +- deps/v8/src/accessors.cc | 1 - deps/v8/src/api.cc | 62 +- deps/v8/src/arm/assembler-arm.cc | 94 -- deps/v8/src/arm/assembler-arm.h | 172 +- deps/v8/src/arm/builtins-arm.cc | 4 +- deps/v8/src/arm/codegen-arm.cc | 347 +++- deps/v8/src/arm/codegen-arm.h | 43 + deps/v8/src/arm/fast-codegen-arm.cc | 4 +- deps/v8/src/arm/full-codegen-arm.cc | 1 + deps/v8/src/arm/ic-arm.cc | 21 +- deps/v8/src/arm/macro-assembler-arm.cc | 64 +- deps/v8/src/arm/macro-assembler-arm.h | 28 +- deps/v8/src/arm/regexp-macro-assembler-arm.cc | 4 +- deps/v8/src/arm/stub-cache-arm.cc | 12 +- deps/v8/src/arm/virtual-frame-arm.cc | 21 +- deps/v8/src/arm/virtual-frame-arm.h | 6 +- deps/v8/src/array.js | 2 + deps/v8/src/assembler.cc | 10 + deps/v8/src/assembler.h | 3 +- deps/v8/src/ast.cc | 5 +- deps/v8/src/ast.h | 61 +- deps/v8/src/bootstrapper.cc | 13 + deps/v8/src/builtins.cc | 376 +++-- deps/v8/src/code-stubs.cc | 15 +- deps/v8/src/code-stubs.h | 11 + deps/v8/src/codegen.cc | 5 + deps/v8/src/compilation-cache.cc | 70 +- deps/v8/src/compiler.cc | 38 +- deps/v8/src/compiler.h | 7 + deps/v8/src/contexts.h | 2 + deps/v8/src/conversions-inl.h | 26 + deps/v8/src/conversions.h | 3 +- deps/v8/src/data-flow.cc | 906 ++++++++++ deps/v8/src/data-flow.h | 391 +++++ deps/v8/src/date-delay.js | 29 +- deps/v8/src/debug-delay.js | 50 +- deps/v8/src/debug.cc | 54 +- deps/v8/src/debug.h | 7 +- deps/v8/src/factory.h | 3 +- deps/v8/src/fast-codegen.h | 1 + deps/v8/src/flag-definitions.h | 9 +- deps/v8/src/frame-element.cc | 1 + deps/v8/src/frame-element.h | 33 +- deps/v8/src/frames.cc | 1 - deps/v8/src/globals.h | 2 + deps/v8/src/handles.cc | 15 +- deps/v8/src/handles.h | 8 +- deps/v8/src/heap-inl.h | 12 + deps/v8/src/heap-profiler.cc | 1 + deps/v8/src/heap-profiler.h | 2 + deps/v8/src/heap.cc | 119 +- deps/v8/src/heap.h | 63 +- deps/v8/src/ia32/assembler-ia32.cc | 114 ++ deps/v8/src/ia32/assembler-ia32.h | 19 +- deps/v8/src/ia32/builtins-ia32.cc | 95 +- deps/v8/src/ia32/codegen-ia32.cc | 1750 +++++++++++++++----- deps/v8/src/ia32/codegen-ia32.h | 97 +- deps/v8/src/ia32/debug-ia32.cc | 9 +- deps/v8/src/ia32/disasm-ia32.cc | 44 +- deps/v8/src/ia32/fast-codegen-ia32.cc | 1 + deps/v8/src/ia32/full-codegen-ia32.cc | 37 +- deps/v8/src/ia32/ic-ia32.cc | 279 ++-- deps/v8/src/ia32/macro-assembler-ia32.cc | 57 +- deps/v8/src/ia32/macro-assembler-ia32.h | 31 +- deps/v8/src/ia32/regexp-macro-assembler-ia32.cc | 49 +- deps/v8/src/ia32/regexp-macro-assembler-ia32.h | 15 - deps/v8/src/ia32/register-allocator-ia32.cc | 1 + deps/v8/src/ia32/stub-cache-ia32.cc | 19 +- deps/v8/src/ia32/virtual-frame-ia32.cc | 154 +- deps/v8/src/ia32/virtual-frame-ia32.h | 20 +- deps/v8/src/ic.cc | 145 +- deps/v8/src/ic.h | 31 +- deps/v8/src/jsregexp.h | 1 + deps/v8/src/jump-target-inl.h | 2 +- deps/v8/src/jump-target.cc | 13 +- deps/v8/src/jump-target.h | 1 + deps/v8/src/liveedit-delay.js | 426 +++++ deps/v8/src/liveedit.cc | 404 ++++- deps/v8/src/liveedit.h | 28 + deps/v8/src/log.cc | 34 +- deps/v8/src/log.h | 7 +- deps/v8/src/macros.py | 10 + deps/v8/src/math.js | 8 +- deps/v8/src/messages.cc | 1 + deps/v8/src/messages.js | 1 + deps/v8/src/mips/codegen-mips.cc | 45 +- deps/v8/src/mips/codegen-mips.h | 14 +- deps/v8/src/mips/fast-codegen-mips.cc | 20 +- deps/v8/src/mips/full-codegen-mips.cc | 5 + deps/v8/src/mips/ic-mips.cc | 25 +- deps/v8/src/mips/jump-target-mips.cc | 1 + deps/v8/src/mips/macro-assembler-mips.cc | 31 +- deps/v8/src/mips/macro-assembler-mips.h | 47 +- deps/v8/src/mips/stub-cache-mips.cc | 31 +- deps/v8/src/mips/virtual-frame-mips.cc | 12 +- deps/v8/src/mips/virtual-frame-mips.h | 19 +- deps/v8/src/number-info.h | 160 +- deps/v8/src/objects-debug.cc | 18 + deps/v8/src/objects-inl.h | 39 +- deps/v8/src/objects.cc | 381 ++++- deps/v8/src/objects.h | 187 ++- deps/v8/src/parser.cc | 43 +- deps/v8/src/platform-freebsd.cc | 6 +- deps/v8/src/platform-linux.cc | 10 +- deps/v8/src/platform-macos.cc | 5 +- deps/v8/src/platform-win32.cc | 5 +- deps/v8/src/prettyprinter.cc | 8 +- deps/v8/src/regexp-delay.js | 39 +- deps/v8/src/register-allocator-inl.h | 39 + deps/v8/src/register-allocator.cc | 21 +- deps/v8/src/register-allocator.h | 21 +- deps/v8/src/rewriter.cc | 58 + deps/v8/src/runtime.cc | 577 ++++++- deps/v8/src/runtime.h | 14 +- deps/v8/src/runtime.js | 2 +- deps/v8/src/scanner.cc | 105 +- deps/v8/src/scanner.h | 67 +- deps/v8/src/scopeinfo.cc | 4 +- deps/v8/src/scopeinfo.h | 1 + deps/v8/src/scopes.cc | 23 +- deps/v8/src/serialize.cc | 42 +- deps/v8/src/spaces.cc | 7 +- deps/v8/src/splay-tree-inl.h | 276 +++ deps/v8/src/splay-tree.h | 191 +++ deps/v8/src/string.js | 12 +- deps/v8/src/top.cc | 1 + deps/v8/src/usage-analyzer.cc | 426 ----- deps/v8/src/utils.h | 60 +- deps/v8/src/v8-counters.h | 19 +- deps/v8/src/v8.h | 2 +- deps/v8/src/variables.cc | 58 +- deps/v8/src/variables.h | 38 +- deps/v8/src/version.cc | 4 +- deps/v8/src/virtual-frame-inl.h | 17 +- deps/v8/src/virtual-frame.cc | 18 +- deps/v8/src/x64/builtins-x64.cc | 4 +- deps/v8/src/x64/codegen-x64.cc | 412 ++++- deps/v8/src/x64/codegen-x64.h | 52 +- deps/v8/src/x64/fast-codegen-x64.cc | 1 + deps/v8/src/x64/full-codegen-x64.cc | 1 + deps/v8/src/x64/ic-x64.cc | 21 +- deps/v8/src/x64/macro-assembler-x64.cc | 63 +- deps/v8/src/x64/macro-assembler-x64.h | 28 +- deps/v8/src/x64/regexp-macro-assembler-x64.cc | 6 +- deps/v8/src/x64/stub-cache-x64.cc | 12 +- deps/v8/src/x64/virtual-frame-x64.cc | 41 +- deps/v8/src/x64/virtual-frame-x64.h | 14 +- deps/v8/src/zone-inl.h | 227 +-- deps/v8/src/zone.cc | 1 + deps/v8/src/zone.h | 96 +- deps/v8/test/cctest/SConscript | 1 + deps/v8/test/cctest/cctest.status | 2 + deps/v8/test/cctest/test-api.cc | 283 +++- deps/v8/test/cctest/test-dataflow.cc | 103 ++ deps/v8/test/cctest/test-debug.cc | 14 +- deps/v8/test/cctest/test-disasm-ia32.cc | 2 + deps/v8/test/cctest/test-heap-profiler.cc | 1 + deps/v8/test/cctest/test-heap.cc | 370 +++-- deps/v8/test/cctest/test-log.cc | 69 +- deps/v8/test/cctest/test-serialize.cc | 28 +- .../array-elements-from-array-prototype-chain.js | 191 +++ .../mjsunit/array-elements-from-array-prototype.js | 191 +++ .../array-elements-from-object-prototype.js | 191 +++ deps/v8/test/mjsunit/array-length.js | 22 +- deps/v8/test/mjsunit/array-slice.js | 11 + deps/v8/test/mjsunit/array-splice.js | 59 +- deps/v8/test/mjsunit/array-unshift.js | 8 + deps/v8/test/mjsunit/bugs/bug-618.js | 45 + deps/v8/test/mjsunit/date.js | 14 + deps/v8/test/mjsunit/debug-liveedit-1.js | 48 + deps/v8/test/mjsunit/debug-liveedit-2.js | 70 + deps/v8/test/mjsunit/debug-liveedit-check-stack.js | 84 + deps/v8/test/mjsunit/debug-scopes.js | 20 +- deps/v8/test/mjsunit/debug-script.js | 2 +- deps/v8/test/mjsunit/fuzz-natives.js | 14 +- deps/v8/test/mjsunit/math-sqrt.js | 44 + deps/v8/test/mjsunit/regress/regress-634.js | 32 + .../mjsunit/regress/regress-636.js} | 22 +- deps/v8/test/mjsunit/string-charat.js | 13 + deps/v8/test/mjsunit/string-index.js | 14 + deps/v8/test/mjsunit/string-split-cache.js | 40 + deps/v8/test/mjsunit/undeletable-functions.js | 14 + deps/v8/tools/gyp/v8.gyp | 8 +- deps/v8/tools/visual_studio/js2c.cmd | 2 +- deps/v8/tools/visual_studio/v8.vcproj | 4 + deps/v8/tools/visual_studio/v8_arm.vcproj | 4 + deps/v8/tools/visual_studio/v8_base.vcproj | 8 - deps/v8/tools/visual_studio/v8_base_arm.vcproj | 8 - deps/v8/tools/visual_studio/v8_base_x64.vcproj | 8 - deps/v8/tools/visual_studio/v8_x64.vcproj | 4 + 194 files changed, 10315 insertions(+), 3224 deletions(-) create mode 100644 deps/v8/src/liveedit-delay.js create mode 100644 deps/v8/src/splay-tree-inl.h create mode 100644 deps/v8/src/splay-tree.h delete mode 100644 deps/v8/src/usage-analyzer.cc create mode 100644 deps/v8/test/cctest/test-dataflow.cc create mode 100644 deps/v8/test/mjsunit/array-elements-from-array-prototype-chain.js create mode 100644 deps/v8/test/mjsunit/array-elements-from-array-prototype.js create mode 100644 deps/v8/test/mjsunit/array-elements-from-object-prototype.js create mode 100644 deps/v8/test/mjsunit/bugs/bug-618.js create mode 100644 deps/v8/test/mjsunit/debug-liveedit-1.js create mode 100644 deps/v8/test/mjsunit/debug-liveedit-2.js create mode 100644 deps/v8/test/mjsunit/debug-liveedit-check-stack.js create mode 100644 deps/v8/test/mjsunit/math-sqrt.js create mode 100644 deps/v8/test/mjsunit/regress/regress-634.js rename deps/v8/{src/usage-analyzer.h => test/mjsunit/regress/regress-636.js} (81%) create mode 100644 deps/v8/test/mjsunit/string-split-cache.js diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 339fd18..4363b19 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,21 @@ +2010-03-10: Version 2.1.3 + + Added API method for context-disposal notifications. + + Added API method for accessing elements by integer index. + + Added missing implementation of Uint32::Value and Value::IsUint32 + API methods. + + Added IsExecutionTerminating API method. + + Disabled strict aliasing for GCC 4.4. + + Fixed string-concatenation bug (issue 636). + + Performance improvements on all platforms. + + 2010-02-23: Version 2.1.2 Fix a crash bug caused by wrong assert. @@ -6,6 +24,7 @@ Performance improvements on all platforms. + 2010-02-19: Version 2.1.1 [ES5] Implemented Object.defineProperty. diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct index 5483663..f7638e0 100644 --- a/deps/v8/SConstruct +++ b/deps/v8/SConstruct @@ -46,8 +46,8 @@ if ANDROID_TOP is None: # on linux we need these compiler flags to avoid crashes in the v8 test suite # and avoid dtoa.c strict aliasing issues if os.environ.get('GCC_VERSION') == '44': - GCC_EXTRA_CCFLAGS = ['-fno-tree-vrp'] - GCC_DTOA_EXTRA_CCFLAGS = ['-fno-strict-aliasing'] + GCC_EXTRA_CCFLAGS = ['-fno-tree-vrp', '-fno-strict-aliasing'] + GCC_DTOA_EXTRA_CCFLAGS = [] else: GCC_EXTRA_CCFLAGS = [] GCC_DTOA_EXTRA_CCFLAGS = [] @@ -255,8 +255,16 @@ LIBRARY_FLAGS = { }, 'msvcltcg:on': { 'CCFLAGS': ['/GL'], - 'LINKFLAGS': ['/LTCG'], 'ARFLAGS': ['/LTCG'], + 'pgo:off': { + 'LINKFLAGS': ['/LTCG'], + }, + 'pgo:instrument': { + 'LINKFLAGS': ['/LTCG:PGI'] + }, + 'pgo:optimize': { + 'LINKFLAGS': ['/LTCG:PGO'] + } } } } @@ -267,6 +275,7 @@ V8_EXTRA_FLAGS = { 'gcc': { 'all': { 'WARNINGFLAGS': ['-Wall', + '-Werror', '-W', '-Wno-unused-parameter', '-Wnon-virtual-dtor'] @@ -526,7 +535,15 @@ SAMPLE_FLAGS = { }, 'msvcltcg:on': { 'CCFLAGS': ['/GL'], - 'LINKFLAGS': ['/LTCG'], + 'pgo:off': { + 'LINKFLAGS': ['/LTCG'], + }, + }, + 'pgo:instrument': { + 'LINKFLAGS': ['/LTCG:PGI'] + }, + 'pgo:optimize': { + 'LINKFLAGS': ['/LTCG:PGO'] } }, 'arch:ia32': { @@ -710,6 +727,11 @@ SIMPLE_OPTIONS = { 'values': ['arm', 'thumb2', 'none'], 'default': 'none', 'help': 'generate thumb2 instructions instead of arm instructions (default)' + }, + 'pgo': { + 'values': ['off', 'instrument', 'optimize'], + 'default': 'off', + 'help': 'select profile guided optimization variant', } } @@ -797,6 +819,8 @@ def VerifyOptions(env): Abort("Shared Object soname not applicable for Windows.") if env['soname'] == 'on' and env['library'] == 'static': Abort("Shared Object soname not applicable for static library.") + if env['os'] != 'win32' and env['pgo'] != 'off': + Abort("Profile guided optimization only supported on Windows.") for (name, option) in SIMPLE_OPTIONS.iteritems(): if (not option.get('default')) and (name not in ARGUMENTS): message = ("A value for option %s must be specified (%s)." % @@ -882,7 +906,7 @@ class BuildContext(object): env['ENV'] = self.env_overrides -def PostprocessOptions(options): +def PostprocessOptions(options, os): # Adjust architecture if the simulator option has been set if (options['simulator'] != 'none') and (options['arch'] != options['simulator']): if 'arch' in ARGUMENTS: @@ -893,6 +917,10 @@ def PostprocessOptions(options): # Print a warning if profiling is enabled without profiling support print "Warning: forcing profilingsupport on when prof is on" options['profilingsupport'] = 'on' + if os == 'win32' and options['pgo'] != 'off' and options['msvcltcg'] == 'off': + if 'msvcltcg' in ARGUMENTS: + print "Warning: forcing msvcltcg on as it is required for pgo (%s)" % options['pgo'] + options['msvcltcg'] = 'on' if (options['armvariant'] == 'none' and options['arch'] == 'arm'): options['armvariant'] = 'arm' if (options['armvariant'] != 'none' and options['arch'] != 'arm'): @@ -923,7 +951,7 @@ def BuildSpecific(env, mode, env_overrides): options = {'mode': mode} for option in SIMPLE_OPTIONS: options[option] = env[option] - PostprocessOptions(options) + PostprocessOptions(options, env['os']) context = BuildContext(options, env_overrides, samples=SplitList(env['sample'])) diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 69b93c6..882eedd 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -261,6 +261,10 @@ template class V8EXPORT_INLINE Handle { return Handle(T::Cast(*that)); } + template inline Handle As() { + return Handle::Cast(*this); + } + private: T* val_; }; @@ -295,6 +299,10 @@ template class V8EXPORT_INLINE Local : public Handle { return Local(T::Cast(*that)); } + template inline Local As() { + return Local::Cast(*this); + } + /** Create a local handle for the content of another handle. * The referee is kept alive by the local handle even when * the original handle is destroyed/disposed. @@ -368,6 +376,10 @@ template class V8EXPORT_INLINE Persistent : public Handle { return Persistent(T::Cast(*that)); } + template inline Persistent As() { + return Persistent::Cast(*this); + } + /** * Creates a new persistent handle for an existing local or * persistent handle. @@ -538,13 +550,13 @@ class V8EXPORT Script { * Compiles the specified script (context-independent). * * \param source Script source code. - * \param origin Script origin, owned by caller, no references are kept + * \param origin Script origin, owned by caller, no references are kept * when New() returns * \param pre_data Pre-parsing data, as obtained by ScriptData::PreCompile() * using pre_data speeds compilation if it's done multiple times. * Owned by caller, no references are kept when New() returns. * \param script_data Arbitrary data associated with script. Using - * this has same effect as calling SetData(), but allows data to be + * this has same effect as calling SetData(), but allows data to be * available to compile event handlers. * \return Compiled script object (context independent; when run it * will use the currently entered context). @@ -559,7 +571,7 @@ class V8EXPORT Script { * object (typically a string) as the script's origin. * * \param source Script source code. - * \patam file_name file name object (typically a string) to be used + * \param file_name file name object (typically a string) to be used * as the script's origin. * \return Compiled script object (context independent; when run it * will use the currently entered context). @@ -571,7 +583,7 @@ class V8EXPORT Script { * Compiles the specified script (bound to current context). * * \param source Script source code. - * \param origin Script origin, owned by caller, no references are kept + * \param origin Script origin, owned by caller, no references are kept * when Compile() returns * \param pre_data Pre-parsing data, as obtained by ScriptData::PreCompile() * using pre_data speeds compilation if it's done multiple times. @@ -755,6 +767,11 @@ class V8EXPORT Value : public Data { bool IsInt32() const; /** + * Returns true if this value is a 32-bit signed integer. + */ + bool IsUint32() const; + + /** * Returns true if this value is a Date. */ bool IsDate() const; @@ -1178,6 +1195,9 @@ class V8EXPORT Object : public Value { Handle value, PropertyAttribute attribs = None); + bool Set(uint32_t index, + Handle value); + // Sets a local property on this object bypassing interceptors and // overriding accessors or read-only properties. // @@ -1192,6 +1212,8 @@ class V8EXPORT Object : public Value { Local Get(Handle key); + Local Get(uint32_t index); + // TODO(1245389): Replace the type-specific versions of these // functions with generic ones that accept a Handle key. bool Has(Handle key); @@ -2485,9 +2507,11 @@ class V8EXPORT V8 { /** * Optional notification that a context has been disposed. V8 uses - * these notifications to guide the garbage collection heuristic. + * these notifications to guide the GC heuristic. Returns the number + * of context disposals - including this one - since the last time + * V8 had a chance to clean up. */ - static void ContextDisposedNotification(); + static int ContextDisposedNotification(); private: V8(); diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript index 73de193..d61da3e 100755 --- a/deps/v8/src/SConscript +++ b/deps/v8/src/SConscript @@ -97,7 +97,6 @@ SOURCES = { token.cc top.cc unicode.cc - usage-analyzer.cc utils.cc v8-counters.cc v8.cc @@ -249,6 +248,7 @@ math.js messages.js apinatives.js debug-delay.js +liveedit-delay.js mirror-delay.js date-delay.js regexp-delay.js diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc index b05719e..e41db94 100644 --- a/deps/v8/src/accessors.cc +++ b/deps/v8/src/accessors.cc @@ -32,7 +32,6 @@ #include "factory.h" #include "scopeinfo.h" #include "top.h" -#include "zone-inl.h" namespace v8 { namespace internal { diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 22d2f4b..93fce79 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -34,9 +34,11 @@ #include "debug.h" #include "execution.h" #include "global-handles.h" +#include "messages.h" #include "platform.h" #include "serialize.h" #include "snapshot.h" +#include "top.h" #include "utils.h" #include "v8threads.h" #include "version.h" @@ -1569,6 +1571,18 @@ bool Value::IsInt32() const { } +bool Value::IsUint32() const { + if (IsDeadCheck("v8::Value::IsUint32()")) return false; + i::Handle obj = Utils::OpenHandle(this); + if (obj->IsSmi()) return i::Smi::cast(*obj)->value() >= 0; + if (obj->IsNumber()) { + double value = obj->Number(); + return i::FastUI2D(i::FastD2UI(value)) == value; + } + return false; +} + + bool Value::IsDate() const { if (IsDeadCheck("v8::Value::IsDate()")) return false; i::Handle obj = Utils::OpenHandle(this); @@ -1974,6 +1988,23 @@ bool v8::Object::Set(v8::Handle key, v8::Handle value, } +bool v8::Object::Set(uint32_t index, v8::Handle value) { + ON_BAILOUT("v8::Object::Set()", return false); + ENTER_V8; + HandleScope scope; + i::Handle self = Utils::OpenHandle(this); + i::Handle value_obj = Utils::OpenHandle(*value); + EXCEPTION_PREAMBLE(); + i::Handle obj = i::SetElement( + self, + index, + value_obj); + has_pending_exception = obj.is_null(); + EXCEPTION_BAILOUT_CHECK(false); + return true; +} + + bool v8::Object::ForceSet(v8::Handle key, v8::Handle value, v8::PropertyAttribute attribs) { @@ -2022,6 +2053,18 @@ Local v8::Object::Get(v8::Handle key) { } +Local v8::Object::Get(uint32_t index) { + ON_BAILOUT("v8::Object::Get()", return Local()); + ENTER_V8; + i::Handle self = Utils::OpenHandle(this); + EXCEPTION_PREAMBLE(); + i::Handle result = i::GetElement(self, index); + has_pending_exception = result.is_null(); + EXCEPTION_BAILOUT_CHECK(Local()); + return Utils::ToLocal(result); +} + + Local v8::Object::GetPrototype() { ON_BAILOUT("v8::Object::GetPrototype()", return Local()); ENTER_V8; @@ -2614,7 +2657,7 @@ int String::WriteAscii(char* buffer, int start, int length) const { StringTracker::RecordWrite(str); // Flatten the string for efficiency. This applies whether we are // using StringInputBuffer or Get(i) to access the characters. - str->TryFlattenIfNotFlat(); + str->TryFlatten(); int end = length; if ( (length == -1) || (length > str->length() - start) ) end = str->length() - start; @@ -2727,6 +2770,17 @@ int32_t Int32::Value() const { } +uint32_t Uint32::Value() const { + if (IsDeadCheck("v8::Uint32::Value()")) return 0; + i::Handle obj = Utils::OpenHandle(this); + if (obj->IsSmi()) { + return i::Smi::cast(*obj)->value(); + } else { + return static_cast(obj->Number()); + } +} + + int v8::Object::InternalFieldCount() { if (IsDeadCheck("v8::Object::InternalFieldCount()")) return 0; i::Handle obj = Utils::OpenHandle(this); @@ -2820,9 +2874,9 @@ void v8::V8::LowMemoryNotification() { } -void v8::V8::ContextDisposedNotification() { - if (!i::V8::IsRunning()) return; - i::Heap::NotifyContextDisposed(); +int v8::V8::ContextDisposedNotification() { + if (!i::V8::IsRunning()) return 0; + return i::Heap::NotifyContextDisposed(); } diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index c79aac6..6b226fd 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -81,100 +81,6 @@ void CpuFeatures::Probe() { // ----------------------------------------------------------------------------- -// Implementation of Register and CRegister - -Register no_reg = { -1 }; - -Register r0 = { 0 }; -Register r1 = { 1 }; -Register r2 = { 2 }; -Register r3 = { 3 }; -Register r4 = { 4 }; -Register r5 = { 5 }; -Register r6 = { 6 }; -Register r7 = { 7 }; -Register r8 = { 8 }; // Used as context register. -Register r9 = { 9 }; -Register r10 = { 10 }; // Used as roots register. -Register fp = { 11 }; -Register ip = { 12 }; -Register sp = { 13 }; -Register lr = { 14 }; -Register pc = { 15 }; - - -CRegister no_creg = { -1 }; - -CRegister cr0 = { 0 }; -CRegister cr1 = { 1 }; -CRegister cr2 = { 2 }; -CRegister cr3 = { 3 }; -CRegister cr4 = { 4 }; -CRegister cr5 = { 5 }; -CRegister cr6 = { 6 }; -CRegister cr7 = { 7 }; -CRegister cr8 = { 8 }; -CRegister cr9 = { 9 }; -CRegister cr10 = { 10 }; -CRegister cr11 = { 11 }; -CRegister cr12 = { 12 }; -CRegister cr13 = { 13 }; -CRegister cr14 = { 14 }; -CRegister cr15 = { 15 }; - -// Support for the VFP registers s0 to s31 (d0 to d15). -// Note that "sN:sM" is the same as "dN/2". -SwVfpRegister s0 = { 0 }; -SwVfpRegister s1 = { 1 }; -SwVfpRegister s2 = { 2 }; -SwVfpRegister s3 = { 3 }; -SwVfpRegister s4 = { 4 }; -SwVfpRegister s5 = { 5 }; -SwVfpRegister s6 = { 6 }; -SwVfpRegister s7 = { 7 }; -SwVfpRegister s8 = { 8 }; -SwVfpRegister s9 = { 9 }; -SwVfpRegister s10 = { 10 }; -SwVfpRegister s11 = { 11 }; -SwVfpRegister s12 = { 12 }; -SwVfpRegister s13 = { 13 }; -SwVfpRegister s14 = { 14 }; -SwVfpRegister s15 = { 15 }; -SwVfpRegister s16 = { 16 }; -SwVfpRegister s17 = { 17 }; -SwVfpRegister s18 = { 18 }; -SwVfpRegister s19 = { 19 }; -SwVfpRegister s20 = { 20 }; -SwVfpRegister s21 = { 21 }; -SwVfpRegister s22 = { 22 }; -SwVfpRegister s23 = { 23 }; -SwVfpRegister s24 = { 24 }; -SwVfpRegister s25 = { 25 }; -SwVfpRegister s26 = { 26 }; -SwVfpRegister s27 = { 27 }; -SwVfpRegister s28 = { 28 }; -SwVfpRegister s29 = { 29 }; -SwVfpRegister s30 = { 30 }; -SwVfpRegister s31 = { 31 }; - -DwVfpRegister d0 = { 0 }; -DwVfpRegister d1 = { 1 }; -DwVfpRegister d2 = { 2 }; -DwVfpRegister d3 = { 3 }; -DwVfpRegister d4 = { 4 }; -DwVfpRegister d5 = { 5 }; -DwVfpRegister d6 = { 6 }; -DwVfpRegister d7 = { 7 }; -DwVfpRegister d8 = { 8 }; -DwVfpRegister d9 = { 9 }; -DwVfpRegister d10 = { 10 }; -DwVfpRegister d11 = { 11 }; -DwVfpRegister d12 = { 12 }; -DwVfpRegister d13 = { 13 }; -DwVfpRegister d14 = { 14 }; -DwVfpRegister d15 = { 15 }; - -// ----------------------------------------------------------------------------- // Implementation of RelocInfo const int RelocInfo::kApplyMask = 0; diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index f6b7a06..c972c57 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -84,25 +84,24 @@ struct Register { int code_; }; - -extern Register no_reg; -extern Register r0; -extern Register r1; -extern Register r2; -extern Register r3; -extern Register r4; -extern Register r5; -extern Register r6; -extern Register r7; -extern Register r8; -extern Register r9; -extern Register r10; -extern Register fp; -extern Register ip; -extern Register sp; -extern Register lr; -extern Register pc; - +const Register no_reg = { -1 }; + +const Register r0 = { 0 }; +const Register r1 = { 1 }; +const Register r2 = { 2 }; +const Register r3 = { 3 }; +const Register r4 = { 4 }; +const Register r5 = { 5 }; +const Register r6 = { 6 }; +const Register r7 = { 7 }; +const Register r8 = { 8 }; // Used as context register. +const Register r9 = { 9 }; +const Register r10 = { 10 }; // Used as roots register. +const Register fp = { 11 }; +const Register ip = { 12 }; +const Register sp = { 13 }; +const Register lr = { 14 }; +const Register pc = { 15 }; // Single word VFP register. struct SwVfpRegister { @@ -139,57 +138,57 @@ struct DwVfpRegister { }; -// Support for VFP registers s0 to s31 (d0 to d15). +// Support for the VFP registers s0 to s31 (d0 to d15). // Note that "s(N):s(N+1)" is the same as "d(N/2)". -extern SwVfpRegister s0; -extern SwVfpRegister s1; -extern SwVfpRegister s2; -extern SwVfpRegister s3; -extern SwVfpRegister s4; -extern SwVfpRegister s5; -extern SwVfpRegister s6; -extern SwVfpRegister s7; -extern SwVfpRegister s8; -extern SwVfpRegister s9; -extern SwVfpRegister s10; -extern SwVfpRegister s11; -extern SwVfpRegister s12; -extern SwVfpRegister s13; -extern SwVfpRegister s14; -extern SwVfpRegister s15; -extern SwVfpRegister s16; -extern SwVfpRegister s17; -extern SwVfpRegister s18; -extern SwVfpRegister s19; -extern SwVfpRegister s20; -extern SwVfpRegister s21; -extern SwVfpRegister s22; -extern SwVfpRegister s23; -extern SwVfpRegister s24; -extern SwVfpRegister s25; -extern SwVfpRegister s26; -extern SwVfpRegister s27; -extern SwVfpRegister s28; -extern SwVfpRegister s29; -extern SwVfpRegister s30; -extern SwVfpRegister s31; - -extern DwVfpRegister d0; -extern DwVfpRegister d1; -extern DwVfpRegister d2; -extern DwVfpRegister d3; -extern DwVfpRegister d4; -extern DwVfpRegister d5; -extern DwVfpRegister d6; -extern DwVfpRegister d7; -extern DwVfpRegister d8; -extern DwVfpRegister d9; -extern DwVfpRegister d10; -extern DwVfpRegister d11; -extern DwVfpRegister d12; -extern DwVfpRegister d13; -extern DwVfpRegister d14; -extern DwVfpRegister d15; +const SwVfpRegister s0 = { 0 }; +const SwVfpRegister s1 = { 1 }; +const SwVfpRegister s2 = { 2 }; +const SwVfpRegister s3 = { 3 }; +const SwVfpRegister s4 = { 4 }; +const SwVfpRegister s5 = { 5 }; +const SwVfpRegister s6 = { 6 }; +const SwVfpRegister s7 = { 7 }; +const SwVfpRegister s8 = { 8 }; +const SwVfpRegister s9 = { 9 }; +const SwVfpRegister s10 = { 10 }; +const SwVfpRegister s11 = { 11 }; +const SwVfpRegister s12 = { 12 }; +const SwVfpRegister s13 = { 13 }; +const SwVfpRegister s14 = { 14 }; +const SwVfpRegister s15 = { 15 }; +const SwVfpRegister s16 = { 16 }; +const SwVfpRegister s17 = { 17 }; +const SwVfpRegister s18 = { 18 }; +const SwVfpRegister s19 = { 19 }; +const SwVfpRegister s20 = { 20 }; +const SwVfpRegister s21 = { 21 }; +const SwVfpRegister s22 = { 22 }; +const SwVfpRegister s23 = { 23 }; +const SwVfpRegister s24 = { 24 }; +const SwVfpRegister s25 = { 25 }; +const SwVfpRegister s26 = { 26 }; +const SwVfpRegister s27 = { 27 }; +const SwVfpRegister s28 = { 28 }; +const SwVfpRegister s29 = { 29 }; +const SwVfpRegister s30 = { 30 }; +const SwVfpRegister s31 = { 31 }; + +const DwVfpRegister d0 = { 0 }; +const DwVfpRegister d1 = { 1 }; +const DwVfpRegister d2 = { 2 }; +const DwVfpRegister d3 = { 3 }; +const DwVfpRegister d4 = { 4 }; +const DwVfpRegister d5 = { 5 }; +const DwVfpRegister d6 = { 6 }; +const DwVfpRegister d7 = { 7 }; +const DwVfpRegister d8 = { 8 }; +const DwVfpRegister d9 = { 9 }; +const DwVfpRegister d10 = { 10 }; +const DwVfpRegister d11 = { 11 }; +const DwVfpRegister d12 = { 12 }; +const DwVfpRegister d13 = { 13 }; +const DwVfpRegister d14 = { 14 }; +const DwVfpRegister d15 = { 15 }; // Coprocessor register @@ -210,23 +209,24 @@ struct CRegister { }; -extern CRegister no_creg; -extern CRegister cr0; -extern CRegister cr1; -extern CRegister cr2; -extern CRegister cr3; -extern CRegister cr4; -extern CRegister cr5; -extern CRegister cr6; -extern CRegister cr7; -extern CRegister cr8; -extern CRegister cr9; -extern CRegister cr10; -extern CRegister cr11; -extern CRegister cr12; -extern CRegister cr13; -extern CRegister cr14; -extern CRegister cr15; +const CRegister no_creg = { -1 }; + +const CRegister cr0 = { 0 }; +const CRegister cr1 = { 1 }; +const CRegister cr2 = { 2 }; +const CRegister cr3 = { 3 }; +const CRegister cr4 = { 4 }; +const CRegister cr5 = { 5 }; +const CRegister cr6 = { 6 }; +const CRegister cr7 = { 7 }; +const CRegister cr8 = { 8 }; +const CRegister cr9 = { 9 }; +const CRegister cr10 = { 10 }; +const CRegister cr11 = { 11 }; +const CRegister cr12 = { 12 }; +const CRegister cr13 = { 13 }; +const CRegister cr14 = { 14 }; +const CRegister cr15 = { 15 }; // Coprocessor number diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index edb1b0a..91e896d 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -61,10 +61,10 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, ASSERT(extra_args == NO_EXTRA_ARGUMENTS); } - // JumpToRuntime expects r0 to contain the number of arguments + // JumpToExternalReference expects r0 to contain the number of arguments // including the receiver and the extra arguments. __ add(r0, r0, Operand(num_extra_args + 1)); - __ JumpToRuntime(ExternalReference(id)); + __ JumpToExternalReference(ExternalReference(id)); } diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index 6644d02..9e59582 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -31,6 +31,7 @@ #include "codegen-inl.h" #include "compiler.h" #include "debug.h" +#include "ic-inl.h" #include "parser.h" #include "register-allocator-inl.h" #include "runtime.h" @@ -142,6 +143,7 @@ CodeGenerator::CodeGenerator(MacroAssembler* masm) void CodeGenerator::Generate(CompilationInfo* info) { // Record the position for debugging purposes. CodeForFunctionPosition(info->function()); + Comment cmnt(masm_, "[ function compiled by virtual frame code generator"); // Initialize state. info_ = info; @@ -3321,6 +3323,25 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList* args) { } +// Generates the Math.pow method - currently just calls runtime. +void CodeGenerator::GenerateMathPow(ZoneList* args) { + ASSERT(args->length() == 2); + Load(args->at(0)); + Load(args->at(1)); + frame_->CallRuntime(Runtime::kMath_pow, 2); + frame_->EmitPush(r0); +} + + +// Generates the Math.sqrt method - currently just calls runtime. +void CodeGenerator::GenerateMathSqrt(ZoneList* args) { + ASSERT(args->length() == 1); + Load(args->at(0)); + frame_->CallRuntime(Runtime::kMath_sqrt, 1); + frame_->EmitPush(r0); +} + + // This should generate code that performs a charCodeAt() call or returns // undefined in order to trigger the slow case, Runtime_StringCharCodeAt. // It is not yet implemented on ARM, so it always goes to the slow case. @@ -3404,6 +3425,44 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList* args) { } +void CodeGenerator::GenerateCharFromCode(ZoneList* args) { + Comment(masm_, "[ GenerateCharFromCode"); + ASSERT(args->length() == 1); + + LoadAndSpill(args->at(0)); + frame_->EmitPop(r0); + + JumpTarget slow_case; + JumpTarget exit; + + // Fast case of Heap::LookupSingleCharacterStringFromCode. + ASSERT(kSmiTag == 0); + ASSERT(kSmiShiftSize == 0); + ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); + __ tst(r0, Operand(kSmiTagMask | + ((~String::kMaxAsciiCharCode) << kSmiTagSize))); + slow_case.Branch(nz); + + ASSERT(kSmiTag == 0); + __ mov(r1, Operand(Factory::single_character_string_cache())); + __ add(r1, r1, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ ldr(r1, MemOperand(r1, FixedArray::kHeaderSize - kHeapObjectTag)); + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ cmp(r1, ip); + slow_case.Branch(eq); + + frame_->EmitPush(r1); + exit.Jump(); + + slow_case.Bind(); + frame_->EmitPush(r0); + frame_->CallRuntime(Runtime::kCharFromCode, 1); + frame_->EmitPush(r0); + + exit.Bind(); +} + + void CodeGenerator::GenerateIsArray(ZoneList* args) { VirtualFrame::SpilledScope spilled_scope; ASSERT(args->length() == 1); @@ -3625,6 +3684,24 @@ void CodeGenerator::GenerateNumberToString(ZoneList* args) { } +void CodeGenerator::GenerateMathSin(ZoneList* args) { + ASSERT_EQ(args->length(), 1); + // Load the argument on the stack and jump to the runtime. + Load(args->at(0)); + frame_->CallRuntime(Runtime::kMath_sin, 1); + frame_->EmitPush(r0); +} + + +void CodeGenerator::GenerateMathCos(ZoneList* args) { + ASSERT_EQ(args->length(), 1); + // Load the argument on the stack and jump to the runtime. + Load(args->at(0)); + frame_->CallRuntime(Runtime::kMath_cos, 1); + frame_->EmitPush(r0); +} + + void CodeGenerator::GenerateObjectEquals(ZoneList* args) { VirtualFrame::SpilledScope spilled_scope; ASSERT(args->length() == 2); @@ -4489,7 +4566,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) { __ bind(&gc); __ push(cp); __ push(r3); - __ TailCallRuntime(ExternalReference(Runtime::kNewClosure), 2, 1); + __ TailCallRuntime(Runtime::kNewClosure, 2, 1); } @@ -4539,7 +4616,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { // Need to collect. Call into runtime system. __ bind(&gc); - __ TailCallRuntime(ExternalReference(Runtime::kNewContext), 1, 1); + __ TailCallRuntime(Runtime::kNewContext, 1, 1); } @@ -4601,8 +4678,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { __ Ret(); __ bind(&slow_case); - ExternalReference runtime(Runtime::kCreateArrayLiteralShallow); - __ TailCallRuntime(runtime, 3, 1); + __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); } @@ -6170,12 +6246,17 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { } +Handle GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { + return Handle::null(); +} + + void StackCheckStub::Generate(MacroAssembler* masm) { // Do tail-call to runtime routine. Runtime routines expect at least one // argument, so give it a Smi. __ mov(r0, Operand(Smi::FromInt(0))); __ push(r0); - __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1, 1); + __ TailCallRuntime(Runtime::kStackGuard, 1, 1); __ StubReturn(1); } @@ -6784,7 +6865,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { // by calling the runtime system. __ bind(&slow); __ push(r1); - __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1, 1); + __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); } @@ -6887,7 +6968,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { // Do the runtime call to allocate the arguments object. __ bind(&runtime); - __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1); + __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); } @@ -7178,6 +7259,170 @@ void StringStubBase::GenerateCopyCharactersLong(MacroAssembler* masm, } +void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, + Register c1, + Register c2, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Register scratch5, + Label* not_found) { + // Register scratch3 is the general scratch register in this function. + Register scratch = scratch3; + + // Make sure that both characters are not digits as such strings has a + // different hash algorithm. Don't try to look for these in the symbol table. + Label not_array_index; + __ sub(scratch, c1, Operand(static_cast('0'))); + __ cmp(scratch, Operand(static_cast('9' - '0'))); + __ b(hi, ¬_array_index); + __ sub(scratch, c2, Operand(static_cast('0'))); + __ cmp(scratch, Operand(static_cast('9' - '0'))); + + // If check failed combine both characters into single halfword. + // This is required by the contract of the method: code at the + // not_found branch expects this combination in c1 register + __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls); + __ b(ls, not_found); + + __ bind(¬_array_index); + // Calculate the two character string hash. + Register hash = scratch1; + GenerateHashInit(masm, hash, c1); + GenerateHashAddCharacter(masm, hash, c2); + GenerateHashGetHash(masm, hash); + + // Collect the two characters in a register. + Register chars = c1; + __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte)); + + // chars: two character string, char 1 in byte 0 and char 2 in byte 1. + // hash: hash of two character string. + + // Load symbol table + // Load address of first element of the symbol table. + Register symbol_table = c2; + __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex); + + // Load undefined value + Register undefined = scratch4; + __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); + + // Calculate capacity mask from the symbol table capacity. + Register mask = scratch2; + __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset)); + __ mov(mask, Operand(mask, ASR, 1)); + __ sub(mask, mask, Operand(1)); + + // Calculate untagged address of the first element of the symbol table. + Register first_symbol_table_element = symbol_table; + __ add(first_symbol_table_element, symbol_table, + Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag)); + + // Registers + // chars: two character string, char 1 in byte 0 and char 2 in byte 1. + // hash: hash of two character string + // mask: capacity mask + // first_symbol_table_element: address of the first element of + // the symbol table + // scratch: - + + // Perform a number of probes in the symbol table. + static const int kProbes = 4; + Label found_in_symbol_table; + Label next_probe[kProbes]; + for (int i = 0; i < kProbes; i++) { + Register candidate = scratch5; // Scratch register contains candidate. + + // Calculate entry in symbol table. + if (i > 0) { + __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i))); + } else { + __ mov(candidate, hash); + } + + __ and_(candidate, candidate, Operand(mask)); + + // Load the entry from the symble table. + ASSERT_EQ(1, SymbolTable::kEntrySize); + __ ldr(candidate, + MemOperand(first_symbol_table_element, + candidate, + LSL, + kPointerSizeLog2)); + + // If entry is undefined no string with this hash can be found. + __ cmp(candidate, undefined); + __ b(eq, not_found); + + // If length is not 2 the string is not a candidate. + __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset)); + __ cmp(scratch, Operand(2)); + __ b(ne, &next_probe[i]); + + // Check that the candidate is a non-external ascii string. + __ ldr(scratch, FieldMemOperand(candidate, HeapObject::kMapOffset)); + __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, + &next_probe[i]); + + // Check if the two characters match. + // Assumes that word load is little endian. + __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize)); + __ cmp(chars, scratch); + __ b(eq, &found_in_symbol_table); + __ bind(&next_probe[i]); + } + + // No matching 2 character string found by probing. + __ jmp(not_found); + + // Scratch register contains result when we fall through to here. + Register result = scratch; + __ bind(&found_in_symbol_table); + if (!result.is(r0)) { + __ mov(r0, result); + } +} + + +void StringStubBase::GenerateHashInit(MacroAssembler* masm, + Register hash, + Register character) { + // hash = character + (character << 10); + __ add(hash, character, Operand(character, LSL, 10)); + // hash ^= hash >> 6; + __ eor(hash, hash, Operand(hash, ASR, 6)); +} + + +void StringStubBase::GenerateHashAddCharacter(MacroAssembler* masm, + Register hash, + Register character) { + // hash += character; + __ add(hash, hash, Operand(character)); + // hash += hash << 10; + __ add(hash, hash, Operand(hash, LSL, 10)); + // hash ^= hash >> 6; + __ eor(hash, hash, Operand(hash, ASR, 6)); +} + + +void StringStubBase::GenerateHashGetHash(MacroAssembler* masm, + Register hash) { + // hash += hash << 3; + __ add(hash, hash, Operand(hash, LSL, 3)); + // hash ^= hash >> 11; + __ eor(hash, hash, Operand(hash, ASR, 11)); + // hash += hash << 15; + __ add(hash, hash, Operand(hash, LSL, 15), SetCC); + + // if (hash == 0) hash = 27; + __ mov(hash, Operand(27), LeaveCC, nz); +} + + void SubStringStub::Generate(MacroAssembler* masm) { Label runtime; @@ -7213,11 +7458,14 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ sub(r2, r2, Operand(r3), SetCC); __ b(mi, &runtime); // Fail if from > to. - // Handle sub-strings of length 2 and less in the runtime system. + // Special handling of sub-strings of length 1 and 2. One character strings + // are handled in the runtime system (looked up in the single character + // cache). Two character strings are looked for in the symbol cache. __ cmp(r2, Operand(2)); - __ b(le, &runtime); + __ b(lt, &runtime); // r2: length + // r3: from index (untaged smi) // r6: from (smi) // r7: to (smi) @@ -7231,6 +7479,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { // r1: instance type // r2: length + // r3: from index (untaged smi) // r5: string // r6: from (smi) // r7: to (smi) @@ -7257,6 +7506,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { // r1: instance type. // r2: length + // r3: from index (untaged smi) // r5: string // r6: from (smi) // r7: to (smi) @@ -7266,6 +7516,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { // r1: instance type. // r2: result string length. + // r3: from index (untaged smi) // r5: string. // r6: from offset (smi) // Check for flat ascii string. @@ -7274,6 +7525,35 @@ void SubStringStub::Generate(MacroAssembler* masm) { ASSERT_EQ(0, kTwoByteStringTag); __ b(eq, &non_ascii_flat); + Label result_longer_than_two; + __ cmp(r2, Operand(2)); + __ b(gt, &result_longer_than_two); + + // Sub string of length 2 requested. + // Get the two characters forming the sub string. + __ add(r5, r5, Operand(r3)); + __ ldrb(r3, FieldMemOperand(r5, SeqAsciiString::kHeaderSize)); + __ ldrb(r4, FieldMemOperand(r5, SeqAsciiString::kHeaderSize + 1)); + + // Try to lookup two character string in symbol table. + Label make_two_character_string; + GenerateTwoCharacterSymbolTableProbe(masm, r3, r4, r1, r5, r6, r7, r9, + &make_two_character_string); + __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); + __ add(sp, sp, Operand(3 * kPointerSize)); + __ Ret(); + + // r2: result string length. + // r3: two characters combined into halfword in little endian byte order. + __ bind(&make_two_character_string); + __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime); + __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); + __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); + __ add(sp, sp, Operand(3 * kPointerSize)); + __ Ret(); + + __ bind(&result_longer_than_two); + // Allocate the result. __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime); @@ -7331,7 +7611,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { // Just jump to runtime to create the sub string. __ bind(&runtime); - __ TailCallRuntime(ExternalReference(Runtime::kSubString), 3, 1); + __ TailCallRuntime(Runtime::kSubString, 3, 1); } @@ -7422,7 +7702,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) { // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) // tagged as a small integer. __ bind(&runtime); - __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1); + __ TailCallRuntime(Runtime::kStringCompare, 2, 1); } @@ -7482,14 +7762,52 @@ void StringAddStub::Generate(MacroAssembler* masm) { // r4: first string instance type (if string_check_) // r5: second string instance type (if string_check_) // Look at the length of the result of adding the two strings. - Label string_add_flat_result; + Label string_add_flat_result, longer_than_two; // Adding two lengths can't overflow. ASSERT(String::kMaxLength * 2 > String::kMaxLength); __ add(r6, r2, Operand(r3)); // Use the runtime system when adding two one character strings, as it // contains optimizations for this specific case using the symbol table. __ cmp(r6, Operand(2)); - __ b(eq, &string_add_runtime); + __ b(ne, &longer_than_two); + + // Check that both strings are non-external ascii strings. + if (!string_check_) { + __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); + __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); + } + __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7, + &string_add_runtime); + + // Get the two characters forming the sub string. + __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); + __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize)); + + // Try to lookup two character string in symbol table. If it is not found + // just allocate a new one. + Label make_two_character_string; + GenerateTwoCharacterSymbolTableProbe(masm, r2, r3, r6, r7, r4, r5, r9, + &make_two_character_string); + __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); + __ add(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + __ bind(&make_two_character_string); + // Resulting string has length 2 and first chars of two strings + // are combined into single halfword in r2 register. + // So we can fill resulting string without two loops by a single + // halfword store instruction (which assumes that processor is + // in a little endian mode) + __ mov(r6, Operand(2)); + __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime); + __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); + __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); + __ add(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + __ bind(&longer_than_two); // Check if resulting string will be flat. __ cmp(r6, Operand(String::kMinNonFlatLength)); __ b(lt, &string_add_flat_result); @@ -7568,6 +7886,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Both strings are sequential ASCII strings. We also know that they are // short (since the sum of the lengths is less than kMinNonFlatLength). + // r6: length of resulting flat string __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime); // Locate first character of result. __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); @@ -7636,7 +7955,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Just jump to runtime to add the two strings. __ bind(&string_add_runtime); - __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1); + __ TailCallRuntime(Runtime::kStringAdd, 2, 1); } diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index 2bc482e..bea98b6 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -370,6 +370,9 @@ class CodeGenerator: public AstVisitor { // Fast support for charCodeAt(n). void GenerateFastCharCodeAt(ZoneList* args); + // Fast support for string.charAt(n) and string[n]. + void GenerateCharFromCode(ZoneList* args); + // Fast support for object equality testing. void GenerateObjectEquals(ZoneList* args); @@ -393,6 +396,16 @@ class CodeGenerator: public AstVisitor { // Fast support for number to string. void GenerateNumberToString(ZoneList* args); + // Fast support for Math.pow(). + void GenerateMathPow(ZoneList* args); + + // Fast call to sine function. + void GenerateMathSin(ZoneList* args); + void GenerateMathCos(ZoneList* args); + + // Fast support for Math.pow(). + void GenerateMathSqrt(ZoneList* args); + // Simple condition analysis. enum ConditionAnalysis { ALWAYS_TRUE, @@ -554,6 +567,36 @@ class StringStubBase: public CodeStub { Register scratch4, Register scratch5, int flags); + + + // Probe the symbol table for a two character string. If the string is + // not found by probing a jump to the label not_found is performed. This jump + // does not guarantee that the string is not in the symbol table. If the + // string is found the code falls through with the string in register r0. + // Contents of both c1 and c2 registers are modified. At the exit c1 is + // guaranteed to contain halfword with low and high bytes equal to + // initial contents of c1 and c2 respectively. + void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, + Register c1, + Register c2, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Register scratch5, + Label* not_found); + + // Generate string hash. + void GenerateHashInit(MacroAssembler* masm, + Register hash, + Register character); + + void GenerateHashAddCharacter(MacroAssembler* masm, + Register hash, + Register character); + + void GenerateHashGetHash(MacroAssembler* masm, + Register hash); }; diff --git a/deps/v8/src/arm/fast-codegen-arm.cc b/deps/v8/src/arm/fast-codegen-arm.cc index 0d322d1..5dedc29 100644 --- a/deps/v8/src/arm/fast-codegen-arm.cc +++ b/deps/v8/src/arm/fast-codegen-arm.cc @@ -40,6 +40,7 @@ Register FastCodeGenerator::accumulator0() { return r0; } Register FastCodeGenerator::accumulator1() { return r1; } Register FastCodeGenerator::scratch0() { return r3; } Register FastCodeGenerator::scratch1() { return r4; } +Register FastCodeGenerator::scratch2() { return r5; } Register FastCodeGenerator::receiver_reg() { return r2; } Register FastCodeGenerator::context_reg() { return cp; } @@ -100,7 +101,7 @@ void FastCodeGenerator::EmitThisPropertyStore(Handle name) { if (needs_write_barrier) { __ mov(scratch1(), Operand(offset)); - __ RecordWrite(scratch0(), scratch1(), ip); + __ RecordWrite(scratch0(), scratch1(), scratch2()); } if (destination().is(accumulator1())) { @@ -180,6 +181,7 @@ void FastCodeGenerator::EmitBitOr() { void FastCodeGenerator::Generate(CompilationInfo* compilation_info) { ASSERT(info_ == NULL); info_ = compilation_info; + Comment cmnt(masm_, "[ function compiled by fast code generator"); // Save the caller's frame pointer and set up our own. Comment prologue_cmnt(masm(), ";; Prologue"); diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index c37e29f..230818f 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -57,6 +57,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) { ASSERT(info_ == NULL); info_ = info; SetFunctionPosition(function()); + Comment cmnt(masm_, "[ function compiled by full code generator"); if (mode == PRIMARY) { int locals_count = scope()->num_stack_slots(); diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index 7ddb338..e68a77a 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -494,7 +494,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) { __ stm(db_w, sp, r2.bit() | r3.bit()); // Perform tail call to the entry. - __ TailCallRuntime(ExternalReference(IC_Utility(kLoadIC_Miss)), 2, 1); + ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss)); + __ TailCallExternalReference(ref, 2, 1); } @@ -531,7 +532,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { __ ldm(ia, sp, r2.bit() | r3.bit()); __ stm(db_w, sp, r2.bit() | r3.bit()); - __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedLoadIC_Miss)), 2, 1); + ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss)); + __ TailCallExternalReference(ref, 2, 1); } @@ -545,7 +547,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { __ ldm(ia, sp, r2.bit() | r3.bit()); __ stm(db_w, sp, r2.bit() | r3.bit()); - __ TailCallRuntime(ExternalReference(Runtime::kGetProperty), 2, 1); + __ TailCallRuntime(Runtime::kGetProperty, 2, 1); } @@ -662,7 +664,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { __ push(r0); // key // Perform tail call to the entry. - __ TailCallRuntime(ExternalReference( + __ TailCallExternalReference(ExternalReference( IC_Utility(kKeyedLoadPropertyWithInterceptor)), 2, 1); __ bind(&slow); @@ -681,7 +683,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { __ ldm(ia, sp, r2.bit() | r3.bit()); __ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit()); - __ TailCallRuntime(ExternalReference(IC_Utility(kKeyedStoreIC_Miss)), 3, 1); + ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss)); + __ TailCallExternalReference(ref, 3, 1); } @@ -695,7 +698,7 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) { __ ldm(ia, sp, r1.bit() | r3.bit()); // r0 == value, r1 == key, r3 == object __ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit()); - __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1); + __ TailCallRuntime(Runtime::kSetProperty, 3, 1); } @@ -854,7 +857,8 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) { __ stm(db_w, sp, r2.bit() | r0.bit()); // Perform tail call to the entry. - __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_Miss)), 3, 1); + ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss)); + __ TailCallExternalReference(ref, 3, 1); } @@ -897,7 +901,8 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) { __ push(receiver); __ push(value); - __ TailCallRuntime(ExternalReference(IC_Utility(kStoreIC_ArrayLength)), 2, 1); + ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength)); + __ TailCallExternalReference(ref, 2, 1); __ bind(&miss); diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index b249d69..36bebdf 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -220,7 +220,7 @@ void MacroAssembler::RecordWrite(Register object, Register offset, // remembered set bits in the new space. // object: heap object pointer (with tag) // offset: offset to store location from the object - and_(scratch, object, Operand(Heap::NewSpaceMask())); + and_(scratch, object, Operand(ExternalReference::new_space_mask())); cmp(scratch, Operand(ExternalReference::new_space_start())); b(eq, &done); @@ -1234,19 +1234,26 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext, } -void MacroAssembler::TailCallRuntime(const ExternalReference& ext, - int num_arguments, - int result_size) { +void MacroAssembler::TailCallExternalReference(const ExternalReference& ext, + int num_arguments, + int result_size) { // TODO(1236192): Most runtime routines don't need the number of // arguments passed in because it is constant. At some point we // should remove this need and make the runtime routine entry code // smarter. mov(r0, Operand(num_arguments)); - JumpToRuntime(ext); + JumpToExternalReference(ext); +} + + +void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, + int num_arguments, + int result_size) { + TailCallExternalReference(ExternalReference(fid), num_arguments, result_size); } -void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) { +void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { #if defined(__thumb__) // Thumb mode builtin. ASSERT((reinterpret_cast(builtin.address()) & 1) == 1); @@ -1410,15 +1417,12 @@ void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings( ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset)); ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset)); - int kFlatAsciiStringMask = - kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; - int kFlatAsciiStringTag = ASCII_STRING_TYPE; - and_(scratch1, scratch1, Operand(kFlatAsciiStringMask)); - and_(scratch2, scratch2, Operand(kFlatAsciiStringMask)); - cmp(scratch1, Operand(kFlatAsciiStringTag)); - // Ignore second test if first test failed. - cmp(scratch2, Operand(kFlatAsciiStringTag), eq); - b(ne, failure); + + JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1, + scratch2, + scratch1, + scratch2, + failure); } void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first, @@ -1439,6 +1443,36 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first, } +void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( + Register first, + Register second, + Register scratch1, + Register scratch2, + Label* failure) { + int kFlatAsciiStringMask = + kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; + int kFlatAsciiStringTag = ASCII_STRING_TYPE; + and_(scratch1, first, Operand(kFlatAsciiStringMask)); + and_(scratch2, second, Operand(kFlatAsciiStringMask)); + cmp(scratch1, Operand(kFlatAsciiStringTag)); + // Ignore second test if first test failed. + cmp(scratch2, Operand(kFlatAsciiStringTag), eq); + b(ne, failure); +} + + +void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type, + Register scratch, + Label* failure) { + int kFlatAsciiStringMask = + kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; + int kFlatAsciiStringTag = ASCII_STRING_TYPE; + and_(scratch, type, Operand(kFlatAsciiStringMask)); + cmp(scratch, Operand(kFlatAsciiStringTag)); + b(ne, failure); +} + + #ifdef ENABLE_DEBUGGER_SUPPORT CodePatcher::CodePatcher(byte* address, int instructions) : address_(address), diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 98cea16..5d9e513 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -333,7 +333,6 @@ class MacroAssembler: public Assembler { void StubReturn(int argc); // Call a runtime routine. - // Eventually this should be used for all C calls. void CallRuntime(Runtime::Function* f, int num_arguments); // Convenience function: Same as above, but takes the fid instead. @@ -344,14 +343,19 @@ class MacroAssembler: public Assembler { int num_arguments); // Tail call of a runtime routine (jump). - // Like JumpToRuntime, but also takes care of passing the number + // Like JumpToExternalReference, but also takes care of passing the number // of parameters. - void TailCallRuntime(const ExternalReference& ext, + void TailCallExternalReference(const ExternalReference& ext, + int num_arguments, + int result_size); + + // Convenience function: tail call a runtime routine (jump). + void TailCallRuntime(Runtime::FunctionId fid, int num_arguments, int result_size); // Jump to a runtime routine. - void JumpToRuntime(const ExternalReference& builtin); + void JumpToExternalReference(const ExternalReference& builtin); // Invoke specified builtin JavaScript function. Adds an entry to // the unresolved list if the name does not resolve. @@ -421,6 +425,22 @@ class MacroAssembler: public Assembler { Register scratch2, Label* not_flat_ascii_strings); + // Checks if both instance types are sequential ASCII strings and jumps to + // label if either is not. + void JumpIfBothInstanceTypesAreNotSequentialAscii( + Register first_object_instance_type, + Register second_object_instance_type, + Register scratch1, + Register scratch2, + Label* failure); + + // Check if instance type is sequential ASCII string and jump to label if + // it is not. + void JumpIfInstanceTypeIsNotSequentialAscii(Register type, + Register scratch, + Label* failure); + + private: void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc index 9dd3b93..f621be4 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc @@ -765,7 +765,7 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { Label grow_failed; // Call GrowStack(backtrack_stackpointer()) - int num_arguments = 2; + static const int num_arguments = 2; FrameAlign(num_arguments, r0); __ mov(r0, backtrack_stackpointer()); __ add(r1, frame_pointer(), Operand(kStackHighEnd)); @@ -966,7 +966,7 @@ void RegExpMacroAssemblerARM::WriteStackPointerToRegister(int reg) { // Private methods: void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) { - int num_arguments = 3; + static const int num_arguments = 3; FrameAlign(num_arguments, scratch); // RegExp code frame pointer. __ mov(r2, frame_pointer()); diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index da73942..5d5b2a5 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -297,7 +297,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, __ push(receiver_reg); __ mov(r2, Operand(Handle(transition))); __ stm(db_w, sp, r2.bit() | r0.bit()); - __ TailCallRuntime( + __ TailCallExternalReference( ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage)), 3, 1); return; @@ -529,7 +529,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED { ExternalReference ref = ExternalReference(IC_Utility(IC::kLoadCallbackProperty)); - __ TailCallRuntime(ref, 5, 1); + __ TailCallExternalReference(ref, 5, 1); __ bind(&cleanup); __ pop(scratch1); @@ -549,7 +549,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED { ExternalReference ref = ExternalReference( IC_Utility(IC::kLoadPropertyWithInterceptorForLoad)); - __ TailCallRuntime(ref, 5, 1); + __ TailCallExternalReference(ref, 5, 1); } private: @@ -719,7 +719,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object, // Do tail-call to the runtime system. ExternalReference load_callback_property = ExternalReference(IC_Utility(IC::kLoadCallbackProperty)); - __ TailCallRuntime(load_callback_property, 5, 1); + __ TailCallExternalReference(load_callback_property, 5, 1); return true; } @@ -1204,7 +1204,7 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object, // Do tail-call to the runtime system. ExternalReference store_callback_property = ExternalReference(IC_Utility(IC::kStoreCallbackProperty)); - __ TailCallRuntime(store_callback_property, 4, 1); + __ TailCallExternalReference(store_callback_property, 4, 1); // Handle store cache miss. __ bind(&miss); @@ -1251,7 +1251,7 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver, // Do tail-call to the runtime system. ExternalReference store_ic_property = ExternalReference(IC_Utility(IC::kStoreInterceptorProperty)); - __ TailCallRuntime(store_ic_property, 3, 1); + __ TailCallExternalReference(store_ic_property, 3, 1); // Handle store cache miss. __ bind(&miss); diff --git a/deps/v8/src/arm/virtual-frame-arm.cc b/deps/v8/src/arm/virtual-frame-arm.cc index 6e1a47f..ab6e5f8 100644 --- a/deps/v8/src/arm/virtual-frame-arm.cc +++ b/deps/v8/src/arm/virtual-frame-arm.cc @@ -35,27 +35,8 @@ namespace v8 { namespace internal { -// ------------------------------------------------------------------------- -// VirtualFrame implementation. - #define __ ACCESS_MASM(masm()) - -// On entry to a function, the virtual frame already contains the -// receiver and the parameters. All initial frame elements are in -// memory. -VirtualFrame::VirtualFrame() - : elements_(parameter_count() + local_count() + kPreallocatedElements), - stack_pointer_(parameter_count()) { // 0-based index of TOS. - for (int i = 0; i <= stack_pointer_; i++) { - elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown)); - } - for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { - register_locations_[i] = kIllegalIndex; - } -} - - void VirtualFrame::SyncElementBelowStackPointer(int index) { UNREACHABLE(); } @@ -314,7 +295,7 @@ void VirtualFrame::EmitPop(Register reg) { void VirtualFrame::EmitPush(Register reg) { ASSERT(stack_pointer_ == element_count() - 1); - elements_.Add(FrameElement::MemoryElement(NumberInfo::kUnknown)); + elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown())); stack_pointer_++; __ push(reg); } diff --git a/deps/v8/src/arm/virtual-frame-arm.h b/deps/v8/src/arm/virtual-frame-arm.h index f69bddf..6eb0811 100644 --- a/deps/v8/src/arm/virtual-frame-arm.h +++ b/deps/v8/src/arm/virtual-frame-arm.h @@ -59,7 +59,7 @@ class VirtualFrame : public ZoneObject { static const int kIllegalIndex = -1; // Construct an initial virtual frame on entry to a JS function. - VirtualFrame(); + inline VirtualFrame(); // Construct a virtual frame as a clone of an existing one. explicit inline VirtualFrame(VirtualFrame* original); @@ -69,7 +69,7 @@ class VirtualFrame : public ZoneObject { // Create a duplicate of an existing valid frame element. FrameElement CopyElementAt(int index, - NumberInfo::Type info = NumberInfo::kUnknown); + NumberInfo info = NumberInfo::Unknown()); // The number of elements on the virtual frame. int element_count() { return elements_.length(); } @@ -344,7 +344,7 @@ class VirtualFrame : public ZoneObject { void EmitPushMultiple(int count, int src_regs); // Push an element on the virtual frame. - inline void Push(Register reg, NumberInfo::Type info = NumberInfo::kUnknown); + inline void Push(Register reg, NumberInfo info = NumberInfo::Unknown()); inline void Push(Handle value); inline void Push(Smi* value); diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js index c28a662..e33c280 100644 --- a/deps/v8/src/array.js +++ b/deps/v8/src/array.js @@ -1149,6 +1149,8 @@ function SetupArray() { ArrayReduce: 1, ArrayReduceRight: 1 }); + + %FinishArrayPrototypeSetup($Array.prototype); } diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc index 96d516f..aaf10ef 100644 --- a/deps/v8/src/assembler.cc +++ b/deps/v8/src/assembler.cc @@ -579,6 +579,11 @@ ExternalReference ExternalReference::random_positive_smi_function() { } +ExternalReference ExternalReference::transcendental_cache_array_address() { + return ExternalReference(TranscendentalCache::cache_array_address()); +} + + ExternalReference ExternalReference::keyed_lookup_cache_keys() { return ExternalReference(KeyedLookupCache::keys_address()); } @@ -619,6 +624,11 @@ ExternalReference ExternalReference::new_space_start() { } +ExternalReference ExternalReference::new_space_mask() { + return ExternalReference(reinterpret_cast
(Heap::NewSpaceMask())); +} + + ExternalReference ExternalReference::new_space_allocation_top_address() { return ExternalReference(Heap::NewSpaceAllocationTopAddress()); } diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index f401306..cde7d69 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -37,7 +37,6 @@ #include "runtime.h" #include "top.h" -#include "zone-inl.h" #include "token.h" namespace v8 { @@ -400,6 +399,7 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference perform_gc_function(); static ExternalReference random_positive_smi_function(); + static ExternalReference transcendental_cache_array_address(); // Static data in the keyed lookup cache. static ExternalReference keyed_lookup_cache_keys(); @@ -427,6 +427,7 @@ class ExternalReference BASE_EMBEDDED { // Static variable Heap::NewSpaceStart() static ExternalReference new_space_start(); + static ExternalReference new_space_mask(); static ExternalReference heap_always_allocate_scope_depth(); // Used for fast allocation in generated code. diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc index 7cb5578..062a5c6 100644 --- a/deps/v8/src/ast.cc +++ b/deps/v8/src/ast.cc @@ -67,8 +67,6 @@ VariableProxy::VariableProxy(Handle name, inside_with_(inside_with) { // names must be canonicalized for fast equality checks ASSERT(name->IsSymbol()); - // at least one access, otherwise no need for a VariableProxy - var_uses_.RecordRead(1); } @@ -87,8 +85,7 @@ void VariableProxy::BindTo(Variable* var) { // eval() etc. Const-ness and variable declarations are a complete mess // in JS. Sigh... var_ = var; - var->var_uses()->RecordUses(&var_uses_); - var->obj_uses()->RecordUses(&obj_uses_); + var->set_is_used(true); } diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h index f2171cc..13502dc 100644 --- a/deps/v8/src/ast.h +++ b/deps/v8/src/ast.h @@ -117,6 +117,9 @@ typedef ZoneList > ZoneObjectList; class AstNode: public ZoneObject { public: + static const int kNoNumber = -1; + + AstNode() : num_(kNoNumber) {} virtual ~AstNode() { } virtual void Accept(AstVisitor* v) = 0; @@ -141,6 +144,13 @@ class AstNode: public ZoneObject { virtual ObjectLiteral* AsObjectLiteral() { return NULL; } virtual ArrayLiteral* AsArrayLiteral() { return NULL; } virtual CompareOperation* AsCompareOperation() { return NULL; } + + int num() { return num_; } + void set_num(int n) { num_ = n; } + + private: + // Support for ast node numbering. + int num_; }; @@ -181,9 +191,10 @@ class Expression: public AstNode { kTestValue }; - static const int kNoLabel = -1; - - Expression() : num_(kNoLabel), def_(NULL), defined_vars_(NULL) {} + Expression() + : bitfields_(0), + def_(NULL), + defined_vars_(NULL) {} virtual Expression* AsExpression() { return this; } @@ -211,11 +222,6 @@ class Expression: public AstNode { // Static type information for this expression. StaticType* type() { return &type_; } - int num() { return num_; } - - // AST node numbering ordered by evaluation order. - void set_num(int n) { num_ = n; } - // Data flow information. DefinitionInfo* var_def() { return def_; } void set_var_def(DefinitionInfo* def) { def_ = def; } @@ -225,11 +231,36 @@ class Expression: public AstNode { defined_vars_ = defined_vars; } + // AST analysis results + + // True if the expression rooted at this node can be compiled by the + // side-effect free compiler. + bool side_effect_free() { return SideEffectFreeField::decode(bitfields_); } + void set_side_effect_free(bool is_side_effect_free) { + bitfields_ &= ~SideEffectFreeField::mask(); + bitfields_ |= SideEffectFreeField::encode(is_side_effect_free); + } + + // Will ToInt32 (ECMA 262-3 9.5) or ToUint32 (ECMA 262-3 9.6) + // be applied to the value of this expression? + // If so, we may be able to optimize the calculation of the value. + bool to_int32() { return ToInt32Field::decode(bitfields_); } + void set_to_int32(bool to_int32) { + bitfields_ &= ~ToInt32Field::mask(); + bitfields_ |= ToInt32Field::encode(to_int32); + } + + private: + uint32_t bitfields_; StaticType type_; - int num_; + DefinitionInfo* def_; ZoneList* defined_vars_; + + // Using template BitField. + class SideEffectFreeField : public BitField {}; + class ToInt32Field : public BitField {}; }; @@ -931,6 +962,10 @@ class VariableProxy: public Expression { return var()->is_global() || var()->rewrite()->IsLeaf(); } + // Reading from a mutable variable is a side effect, but 'this' is + // immutable. + virtual bool IsTrivial() { return is_this(); } + bool IsVariable(Handle n) { return !is_this() && name().is_identical_to(n); } @@ -942,8 +977,6 @@ class VariableProxy: public Expression { Handle name() const { return name_; } Variable* var() const { return var_; } - UseCount* var_uses() { return &var_uses_; } - UseCount* obj_uses() { return &obj_uses_; } bool is_this() const { return is_this_; } bool inside_with() const { return inside_with_; } @@ -956,10 +989,6 @@ class VariableProxy: public Expression { bool is_this_; bool inside_with_; - // VariableProxy usage info. - UseCount var_uses_; // uses of the variable value - UseCount obj_uses_; // uses of the object the variable points to - VariableProxy(Handle name, bool is_this, bool inside_with); explicit VariableProxy(bool is_this); @@ -1018,6 +1047,8 @@ class Slot: public Expression { virtual bool IsLeaf() { return true; } + bool IsStackAllocated() { return type_ == PARAMETER || type_ == LOCAL; } + // Accessors Variable* var() const { return var_; } Type type() const { return type_; } diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index a7cf421..12efbc1 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -1050,6 +1050,19 @@ bool Genesis::InstallNatives() { script->set_type(Smi::FromInt(Script::TYPE_NATIVE)); global_context()->set_empty_script(*script); } + { + // Builtin function for OpaqueReference -- a JSValue-based object, + // that keeps its field isolated from JavaScript code. It may store + // objects, that JavaScript code may not access. + Handle opaque_reference_fun = + InstallFunction(builtins, "OpaqueReference", JS_VALUE_TYPE, + JSValue::kSize, Top::initial_object_prototype(), + Builtins::Illegal, false); + Handle prototype = + Factory::NewJSObject(Top::object_function(), TENURED); + SetPrototype(opaque_reference_fun, prototype); + global_context()->set_opaque_reference_function(*opaque_reference_fun); + } if (FLAG_natives_file == NULL) { // Without natives file, install default natives. diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index 8e88c28..a8ba818 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -242,6 +242,109 @@ BUILTIN(ArrayCodeGeneric) { } +static Object* AllocateJSArray() { + JSFunction* array_function = + Top::context()->global_context()->array_function(); + Object* result = Heap::AllocateJSObject(array_function); + if (result->IsFailure()) return result; + return result; +} + + +static Object* AllocateEmptyJSArray() { + Object* result = AllocateJSArray(); + if (result->IsFailure()) return result; + JSArray* result_array = JSArray::cast(result); + result_array->set_length(Smi::FromInt(0)); + result_array->set_elements(Heap::empty_fixed_array()); + return result_array; +} + + +static void CopyElements(AssertNoAllocation* no_gc, + FixedArray* dst, + int dst_index, + FixedArray* src, + int src_index, + int len) { + ASSERT(dst != src); // Use MoveElements instead. + memcpy(dst->data_start() + dst_index, + src->data_start() + src_index, + len * kPointerSize); + WriteBarrierMode mode = dst->GetWriteBarrierMode(*no_gc); + if (mode == UPDATE_WRITE_BARRIER) { + Heap::RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len); + } +} + + +static void MoveElements(AssertNoAllocation* no_gc, + FixedArray* dst, + int dst_index, + FixedArray* src, + int src_index, + int len) { + memmove(dst->data_start() + dst_index, + src->data_start() + src_index, + len * kPointerSize); + WriteBarrierMode mode = dst->GetWriteBarrierMode(*no_gc); + if (mode == UPDATE_WRITE_BARRIER) { + Heap::RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len); + } +} + + +static void FillWithHoles(FixedArray* dst, int from, int to) { + MemsetPointer(dst->data_start() + from, Heap::the_hole_value(), to - from); +} + + +static bool ArrayPrototypeHasNoElements() { + // This method depends on non writability of Object and Array prototype + // fields. + Context* global_context = Top::context()->global_context(); + // Array.prototype + JSObject* proto = + JSObject::cast(global_context->array_function()->prototype()); + if (proto->elements() != Heap::empty_fixed_array()) return false; + // Hidden prototype + proto = JSObject::cast(proto->GetPrototype()); + ASSERT(proto->elements() == Heap::empty_fixed_array()); + // Object.prototype + proto = JSObject::cast(proto->GetPrototype()); + if (proto != global_context->initial_object_prototype()) return false; + if (proto->elements() != Heap::empty_fixed_array()) return false; + ASSERT(proto->GetPrototype()->IsNull()); + return true; +} + + +static Object* CallJsBuiltin(const char* name, + BuiltinArguments args) { + HandleScope handleScope; + + Handle js_builtin = + GetProperty(Handle(Top::global_context()->builtins()), + name); + ASSERT(js_builtin->IsJSFunction()); + Handle function(Handle::cast(js_builtin)); + Vector argv(Vector::New(args.length() - 1)); + int n_args = args.length() - 1; + for (int i = 0; i < n_args; i++) { + argv[i] = &args[i + 1]; + } + bool pending_exception = false; + Handle result = Execution::Call(function, + args.receiver(), + n_args, + argv.start(), + &pending_exception); + argv.Dispose(); + if (pending_exception) return Failure::Exception(); + return *result; +} + + BUILTIN(ArrayPush) { JSArray* array = JSArray::cast(*args.receiver()); ASSERT(array->HasFastElements()); @@ -261,22 +364,21 @@ BUILTIN(ArrayPush) { if (new_length > elms->length()) { // New backing storage is needed. int capacity = new_length + (new_length >> 1) + 16; - Object* obj = Heap::AllocateFixedArrayWithHoles(capacity); + Object* obj = Heap::AllocateUninitializedFixedArray(capacity); if (obj->IsFailure()) return obj; + FixedArray* new_elms = FixedArray::cast(obj); AssertNoAllocation no_gc; - FixedArray* new_elms = FixedArray::cast(obj); - WriteBarrierMode mode = new_elms->GetWriteBarrierMode(no_gc); - // Fill out the new array with old elements. - for (int i = 0; i < len; i++) new_elms->set(i, elms->get(i), mode); + CopyElements(&no_gc, new_elms, 0, elms, 0, len); + FillWithHoles(new_elms, new_length, capacity); + elms = new_elms; array->set_elements(elms); } + // Add the provided values. AssertNoAllocation no_gc; WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); - - // Add the provided values. for (int index = 0; index < to_add; index++) { elms->set(index + len, args[index + 1], mode); } @@ -290,10 +392,9 @@ BUILTIN(ArrayPush) { BUILTIN(ArrayPop) { JSArray* array = JSArray::cast(*args.receiver()); ASSERT(array->HasFastElements()); - Object* undefined = Heap::undefined_value(); int len = Smi::cast(array->length())->value(); - if (len == 0) return undefined; + if (len == 0) return Heap::undefined_value(); // Get top element FixedArray* elms = FixedArray::cast(array->elements()); @@ -318,41 +419,28 @@ BUILTIN(ArrayPop) { } -static Object* GetElementToMove(uint32_t index, - FixedArray* elms, - JSObject* prototype) { - Object* e = elms->get(index); - if (e->IsTheHole() && prototype->HasElement(index)) { - e = prototype->GetElement(index); +BUILTIN(ArrayShift) { + if (!ArrayPrototypeHasNoElements()) { + return CallJsBuiltin("ArrayShift", args); } - return e; -} - -BUILTIN(ArrayShift) { JSArray* array = JSArray::cast(*args.receiver()); ASSERT(array->HasFastElements()); int len = Smi::cast(array->length())->value(); if (len == 0) return Heap::undefined_value(); - // Fetch the prototype. - JSFunction* array_function = - Top::context()->global_context()->array_function(); - JSObject* prototype = JSObject::cast(array_function->prototype()); - FixedArray* elms = FixedArray::cast(array->elements()); // Get first element Object* first = elms->get(0); if (first->IsTheHole()) { - first = prototype->GetElement(0); + first = Heap::undefined_value(); } // Shift the elements. - for (int i = 0; i < len - 1; i++) { - elms->set(i, GetElementToMove(i + 1, elms, prototype)); - } + AssertNoAllocation no_gc; + MoveElements(&no_gc, elms, 0, elms, 1, len - 1); elms->set(len - 1, Heap::the_hole_value()); // Set the length. @@ -363,6 +451,10 @@ BUILTIN(ArrayShift) { BUILTIN(ArrayUnshift) { + if (!ArrayPrototypeHasNoElements()) { + return CallJsBuiltin("ArrayUnshift", args); + } + JSArray* array = JSArray::cast(*args.receiver()); ASSERT(array->HasFastElements()); @@ -379,38 +471,22 @@ BUILTIN(ArrayUnshift) { FixedArray* elms = FixedArray::cast(array->elements()); - // Fetch the prototype. - JSFunction* array_function = - Top::context()->global_context()->array_function(); - JSObject* prototype = JSObject::cast(array_function->prototype()); - if (new_length > elms->length()) { // New backing storage is needed. int capacity = new_length + (new_length >> 1) + 16; - Object* obj = Heap::AllocateFixedArrayWithHoles(capacity); + Object* obj = Heap::AllocateUninitializedFixedArray(capacity); if (obj->IsFailure()) return obj; + FixedArray* new_elms = FixedArray::cast(obj); AssertNoAllocation no_gc; - FixedArray* new_elms = FixedArray::cast(obj); - WriteBarrierMode mode = new_elms->GetWriteBarrierMode(no_gc); - // Fill out the new array with old elements. - for (int i = 0; i < len; i++) - new_elms->set(to_add + i, - GetElementToMove(i, elms, prototype), - mode); + CopyElements(&no_gc, new_elms, to_add, elms, 0, len); + FillWithHoles(new_elms, new_length, capacity); elms = new_elms; array->set_elements(elms); } else { AssertNoAllocation no_gc; - WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); - - // Move elements to the right - for (int i = 0; i < len; i++) { - elms->set(new_length - i - 1, - GetElementToMove(len - i - 1, elms, prototype), - mode); - } + MoveElements(&no_gc, elms, to_add, elms, 0, len); } // Add the provided values. @@ -426,32 +502,11 @@ BUILTIN(ArrayUnshift) { } -static Object* CallJsBuiltin(const char* name, - BuiltinArguments args) { - HandleScope handleScope; - - Handle js_builtin = - GetProperty(Handle(Top::global_context()->builtins()), - name); - ASSERT(js_builtin->IsJSFunction()); - Handle function(Handle::cast(js_builtin)); - Vector argv(Vector::New(args.length() - 1)); - int n_args = args.length() - 1; - for (int i = 0; i < n_args; i++) { - argv[i] = &args[i + 1]; +BUILTIN(ArraySlice) { + if (!ArrayPrototypeHasNoElements()) { + return CallJsBuiltin("ArraySlice", args); } - bool pending_exception = false; - Handle result = Execution::Call(function, - args.receiver(), - n_args, - argv.start(), - &pending_exception); - if (pending_exception) return Failure::Exception(); - return *result; -} - -BUILTIN(ArraySlice) { JSArray* array = JSArray::cast(*args.receiver()); ASSERT(array->HasFastElements()); @@ -460,21 +515,21 @@ BUILTIN(ArraySlice) { int n_arguments = args.length() - 1; // Note carefully choosen defaults---if argument is missing, - // it's undefined which gets converted to 0 for relativeStart - // and to len for relativeEnd. - int relativeStart = 0; - int relativeEnd = len; + // it's undefined which gets converted to 0 for relative_start + // and to len for relative_end. + int relative_start = 0; + int relative_end = len; if (n_arguments > 0) { Object* arg1 = args[1]; if (arg1->IsSmi()) { - relativeStart = Smi::cast(arg1)->value(); + relative_start = Smi::cast(arg1)->value(); } else if (!arg1->IsUndefined()) { return CallJsBuiltin("ArraySlice", args); } if (n_arguments > 1) { Object* arg2 = args[2]; if (arg2->IsSmi()) { - relativeEnd = Smi::cast(arg2)->value(); + relative_end = Smi::cast(arg2)->value(); } else if (!arg2->IsUndefined()) { return CallJsBuiltin("ArraySlice", args); } @@ -482,43 +537,31 @@ BUILTIN(ArraySlice) { } // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 6. - int k = (relativeStart < 0) ? Max(len + relativeStart, 0) - : Min(relativeStart, len); + int k = (relative_start < 0) ? Max(len + relative_start, 0) + : Min(relative_start, len); // ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 8. - int final = (relativeEnd < 0) ? Max(len + relativeEnd, 0) - : Min(relativeEnd, len); + int final = (relative_end < 0) ? Max(len + relative_end, 0) + : Min(relative_end, len); // Calculate the length of result array. int result_len = final - k; - if (result_len < 0) { - result_len = 0; + if (result_len <= 0) { + return AllocateEmptyJSArray(); } - JSFunction* array_function = - Top::context()->global_context()->array_function(); - Object* result = Heap::AllocateJSObject(array_function); + Object* result = AllocateJSArray(); if (result->IsFailure()) return result; JSArray* result_array = JSArray::cast(result); - result = Heap::AllocateFixedArrayWithHoles(result_len); + result = Heap::AllocateUninitializedFixedArray(result_len); if (result->IsFailure()) return result; FixedArray* result_elms = FixedArray::cast(result); FixedArray* elms = FixedArray::cast(array->elements()); - // Fetch the prototype. - JSObject* prototype = JSObject::cast(array_function->prototype()); - AssertNoAllocation no_gc; - WriteBarrierMode mode = result_elms->GetWriteBarrierMode(no_gc); - - // Fill newly created array. - for (int i = 0; i < result_len; i++) { - result_elms->set(i, - GetElementToMove(k + i, elms, prototype), - mode); - } + CopyElements(&no_gc, result_elms, 0, elms, k, result_len); // Set elements. result_array->set_elements(result_elms); @@ -530,6 +573,10 @@ BUILTIN(ArraySlice) { BUILTIN(ArraySplice) { + if (!ArrayPrototypeHasNoElements()) { + return CallJsBuiltin("ArraySplice", args); + } + JSArray* array = JSArray::cast(*args.receiver()); ASSERT(array->HasFastElements()); @@ -546,118 +593,111 @@ BUILTIN(ArraySplice) { return Heap::undefined_value(); } - int relativeStart = 0; + int relative_start = 0; Object* arg1 = args[1]; if (arg1->IsSmi()) { - relativeStart = Smi::cast(arg1)->value(); + relative_start = Smi::cast(arg1)->value(); } else if (!arg1->IsUndefined()) { return CallJsBuiltin("ArraySplice", args); } - int actualStart = (relativeStart < 0) ? Max(len + relativeStart, 0) - : Min(relativeStart, len); + int actual_start = (relative_start < 0) ? Max(len + relative_start, 0) + : Min(relative_start, len); // SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is // given differently from when an undefined delete count is given. // This does not follow ECMA-262, but we do the same for // compatibility. - int deleteCount = len; + int delete_count = len; if (n_arguments > 1) { Object* arg2 = args[2]; if (arg2->IsSmi()) { - deleteCount = Smi::cast(arg2)->value(); + delete_count = Smi::cast(arg2)->value(); } else { return CallJsBuiltin("ArraySplice", args); } } - int actualDeleteCount = Min(Max(deleteCount, 0), len - actualStart); - - JSFunction* array_function = - Top::context()->global_context()->array_function(); - - // Allocate result array. - Object* result = Heap::AllocateJSObject(array_function); - if (result->IsFailure()) return result; - JSArray* result_array = JSArray::cast(result); - - result = Heap::AllocateFixedArrayWithHoles(actualDeleteCount); - if (result->IsFailure()) return result; - FixedArray* result_elms = FixedArray::cast(result); + int actual_delete_count = Min(Max(delete_count, 0), len - actual_start); FixedArray* elms = FixedArray::cast(array->elements()); - // Fetch the prototype. - JSObject* prototype = JSObject::cast(array_function->prototype()); + JSArray* result_array = NULL; + if (actual_delete_count == 0) { + Object* result = AllocateEmptyJSArray(); + if (result->IsFailure()) return result; + result_array = JSArray::cast(result); + } else { + // Allocate result array. + Object* result = AllocateJSArray(); + if (result->IsFailure()) return result; + result_array = JSArray::cast(result); - AssertNoAllocation no_gc; - WriteBarrierMode mode = result_elms->GetWriteBarrierMode(no_gc); + result = Heap::AllocateUninitializedFixedArray(actual_delete_count); + if (result->IsFailure()) return result; + FixedArray* result_elms = FixedArray::cast(result); - // Fill newly created array. - for (int k = 0; k < actualDeleteCount; k++) { - result_elms->set(k, - GetElementToMove(actualStart + k, elms, prototype), - mode); - } + AssertNoAllocation no_gc; + // Fill newly created array. + CopyElements(&no_gc, + result_elms, 0, + elms, actual_start, + actual_delete_count); - // Set elements. - result_array->set_elements(result_elms); + // Set elements. + result_array->set_elements(result_elms); - // Set the length. - result_array->set_length(Smi::FromInt(actualDeleteCount)); + // Set the length. + result_array->set_length(Smi::FromInt(actual_delete_count)); + } - int itemCount = (n_arguments > 1) ? (n_arguments - 2) : 0; + int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0; - int new_length = len - actualDeleteCount + itemCount; + int new_length = len - actual_delete_count + item_count; - mode = elms->GetWriteBarrierMode(no_gc); - if (itemCount < actualDeleteCount) { + if (item_count < actual_delete_count) { // Shrink the array. - for (int k = actualStart; k < (len - actualDeleteCount); k++) { - elms->set(k + itemCount, - GetElementToMove(k + actualDeleteCount, elms, prototype), - mode); - } - - for (int k = len; k > new_length; k--) { - elms->set(k - 1, Heap::the_hole_value()); - } - } else if (itemCount > actualDeleteCount) { + AssertNoAllocation no_gc; + MoveElements(&no_gc, + elms, actual_start + item_count, + elms, actual_start + actual_delete_count, + (len - actual_delete_count - actual_start)); + FillWithHoles(elms, new_length, len); + } else if (item_count > actual_delete_count) { // Currently fixed arrays cannot grow too big, so // we should never hit this case. - ASSERT((itemCount - actualDeleteCount) <= (Smi::kMaxValue - len)); - - FixedArray* source_elms = elms; + ASSERT((item_count - actual_delete_count) <= (Smi::kMaxValue - len)); // Check if array need to grow. if (new_length > elms->length()) { // New backing storage is needed. int capacity = new_length + (new_length >> 1) + 16; - Object* obj = Heap::AllocateFixedArrayWithHoles(capacity); + Object* obj = Heap::AllocateUninitializedFixedArray(capacity); if (obj->IsFailure()) return obj; - FixedArray* new_elms = FixedArray::cast(obj); - mode = new_elms->GetWriteBarrierMode(no_gc); - // Copy the part before actualStart as is. - for (int k = 0; k < actualStart; k++) { - new_elms->set(k, elms->get(k), mode); - } + AssertNoAllocation no_gc; + // Copy the part before actual_start as is. + CopyElements(&no_gc, new_elms, 0, elms, 0, actual_start); + CopyElements(&no_gc, + new_elms, actual_start + item_count, + elms, actual_start + actual_delete_count, + (len - actual_delete_count - actual_start)); + FillWithHoles(new_elms, new_length, capacity); - source_elms = elms; elms = new_elms; array->set_elements(elms); - } - - for (int k = len - actualDeleteCount; k > actualStart; k--) { - elms->set(k + itemCount - 1, - GetElementToMove(k + actualDeleteCount - 1, - source_elms, - prototype), - mode); + } else { + AssertNoAllocation no_gc; + MoveElements(&no_gc, + elms, actual_start + item_count, + elms, actual_start + actual_delete_count, + (len - actual_delete_count - actual_start)); } } - for (int k = actualStart; k < actualStart + itemCount; k++) { - elms->set(k, args[3 + k - actualStart], mode); + AssertNoAllocation no_gc; + WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); + for (int k = actual_start; k < actual_start + item_count; k++) { + elms->set(k, args[3 + k - actual_start], mode); } // Set the length. diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc index 4d0fd29..e42f758 100644 --- a/deps/v8/src/code-stubs.cc +++ b/deps/v8/src/code-stubs.cc @@ -83,6 +83,11 @@ void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) { } +int CodeStub::GetCodeKind() { + return Code::STUB; +} + + Handle CodeStub::GetCode() { Code* code; if (!FindCodeInCache(&code)) { @@ -97,7 +102,10 @@ Handle CodeStub::GetCode() { masm.GetCode(&desc); // Copy the generated code into a heap object. - Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop()); + Code::Flags flags = Code::ComputeFlags( + static_cast(GetCodeKind()), + InLoop(), + GetICState()); Handle new_object = Factory::NewCode(desc, NULL, flags, masm.CodeObject()); RecordCodeGeneration(*new_object, &masm); @@ -132,7 +140,10 @@ Object* CodeStub::TryGetCode() { masm.GetCode(&desc); // Try to copy the generated code into a heap object. - Code::Flags flags = Code::ComputeFlags(Code::STUB, InLoop()); + Code::Flags flags = Code::ComputeFlags( + static_cast(GetCodeKind()), + InLoop(), + GetICState()); Object* new_object = Heap::CreateCode(desc, NULL, flags, masm.CodeObject()); if (new_object->IsFailure()) return new_object; diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h index 3901a64..de2ad56 100644 --- a/deps/v8/src/code-stubs.h +++ b/deps/v8/src/code-stubs.h @@ -28,6 +28,8 @@ #ifndef V8_CODE_STUBS_H_ #define V8_CODE_STUBS_H_ +#include "globals.h" + namespace v8 { namespace internal { @@ -48,6 +50,7 @@ namespace internal { V(FastNewClosure) \ V(FastNewContext) \ V(FastCloneShallowArray) \ + V(TranscendentalCache) \ V(GenericUnaryOp) \ V(RevertToNumber) \ V(ToBoolean) \ @@ -138,6 +141,14 @@ class CodeStub BASE_EMBEDDED { // lazily generated function should be fully optimized or not. virtual InLoopFlag InLoop() { return NOT_IN_LOOP; } + // GenericBinaryOpStub needs to override this. + virtual int GetCodeKind(); + + // GenericBinaryOpStub needs to override this. + virtual InlineCacheState GetICState() { + return UNINITIALIZED; + } + // Returns a name for logging/debugging purposes. virtual const char* GetName() { return MajorName(MajorKey(), false); } diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc index 5e25f69..6841c21 100644 --- a/deps/v8/src/codegen.cc +++ b/deps/v8/src/codegen.cc @@ -369,6 +369,7 @@ CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = { {&CodeGenerator::GenerateValueOf, "_ValueOf"}, {&CodeGenerator::GenerateSetValueOf, "_SetValueOf"}, {&CodeGenerator::GenerateFastCharCodeAt, "_FastCharCodeAt"}, + {&CodeGenerator::GenerateCharFromCode, "_CharFromCode"}, {&CodeGenerator::GenerateObjectEquals, "_ObjectEquals"}, {&CodeGenerator::GenerateLog, "_Log"}, {&CodeGenerator::GenerateRandomPositiveSmi, "_RandomPositiveSmi"}, @@ -380,6 +381,10 @@ CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = { {&CodeGenerator::GenerateStringCompare, "_StringCompare"}, {&CodeGenerator::GenerateRegExpExec, "_RegExpExec"}, {&CodeGenerator::GenerateNumberToString, "_NumberToString"}, + {&CodeGenerator::GenerateMathPow, "_Math_pow"}, + {&CodeGenerator::GenerateMathSin, "_Math_sin"}, + {&CodeGenerator::GenerateMathCos, "_Math_cos"}, + {&CodeGenerator::GenerateMathSqrt, "_Math_sqrt"}, }; diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc index 54273673..9dcbeb5 100644 --- a/deps/v8/src/compilation-cache.cc +++ b/deps/v8/src/compilation-cache.cc @@ -32,28 +32,23 @@ namespace v8 { namespace internal { - // The number of sub caches covering the different types to cache. static const int kSubCacheCount = 4; // The number of generations for each sub cache. -#if defined(ANDROID) -static const int kScriptGenerations = 1; -static const int kEvalGlobalGenerations = 1; -static const int kEvalContextualGenerations = 1; -static const int kRegExpGenerations = 1; -#else // The number of ScriptGenerations is carefully chosen based on histograms. // See issue 458: http://code.google.com/p/v8/issues/detail?id=458 static const int kScriptGenerations = 5; static const int kEvalGlobalGenerations = 2; static const int kEvalContextualGenerations = 2; static const int kRegExpGenerations = 2; -#endif // Initial size of each compilation cache table allocated. static const int kInitialCacheSize = 64; +// Index for the first generation in the cache. +static const int kFirstGeneration = 0; + // The compilation cache consists of several generational sub-caches which uses // this class as a base class. A sub-cache contains a compilation cache tables // for each generation of the sub-cache. Since the same source code string has @@ -70,6 +65,15 @@ class CompilationSubCache { // Get the compilation cache tables for a specific generation. Handle GetTable(int generation); + // Accessors for first generation. + Handle GetFirstTable() { + return GetTable(kFirstGeneration); + } + void SetFirstTable(Handle value) { + ASSERT(kFirstGeneration < generations_); + tables_[kFirstGeneration] = *value; + } + // Age the sub-cache by evicting the oldest generation and creating a new // young generation. void Age(); @@ -104,6 +108,10 @@ class CompilationCacheScript : public CompilationSubCache { void Put(Handle source, Handle boilerplate); private: + // Note: Returns a new hash table if operation results in expansion. + Handle TablePut(Handle source, + Handle boilerplate); + bool HasOrigin(Handle boilerplate, Handle name, int line_offset, @@ -125,6 +133,12 @@ class CompilationCacheEval: public CompilationSubCache { Handle context, Handle boilerplate); + private: + // Note: Returns a new hash table if operation results in expansion. + Handle TablePut(Handle source, + Handle context, + Handle boilerplate); + DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval); }; @@ -140,6 +154,11 @@ class CompilationCacheRegExp: public CompilationSubCache { void Put(Handle source, JSRegExp::Flags flags, Handle data); + private: + // Note: Returns a new hash table if operation results in expansion. + Handle TablePut(Handle source, + JSRegExp::Flags flags, + Handle data); DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp); }; @@ -287,12 +306,19 @@ Handle CompilationCacheScript::Lookup(Handle source, } +Handle CompilationCacheScript::TablePut( + Handle source, + Handle boilerplate) { + CALL_HEAP_FUNCTION(GetFirstTable()->Put(*source, *boilerplate), + CompilationCacheTable); +} + + void CompilationCacheScript::Put(Handle source, Handle boilerplate) { HandleScope scope; ASSERT(boilerplate->IsBoilerplate()); - Handle table = GetTable(0); - CALL_HEAP_FUNCTION_VOID(table->Put(*source, *boilerplate)); + SetFirstTable(TablePut(source, boilerplate)); } @@ -326,13 +352,21 @@ Handle CompilationCacheEval::Lookup(Handle source, } +Handle CompilationCacheEval::TablePut( + Handle source, + Handle context, + Handle boilerplate) { + CALL_HEAP_FUNCTION(GetFirstTable()->PutEval(*source, *context, *boilerplate), + CompilationCacheTable); +} + + void CompilationCacheEval::Put(Handle source, Handle context, Handle boilerplate) { HandleScope scope; ASSERT(boilerplate->IsBoilerplate()); - Handle table = GetTable(0); - CALL_HEAP_FUNCTION_VOID(table->PutEval(*source, *context, *boilerplate)); + SetFirstTable(TablePut(source, context, boilerplate)); } @@ -366,12 +400,20 @@ Handle CompilationCacheRegExp::Lookup(Handle source, } +Handle CompilationCacheRegExp::TablePut( + Handle source, + JSRegExp::Flags flags, + Handle data) { + CALL_HEAP_FUNCTION(GetFirstTable()->PutRegExp(*source, flags, *data), + CompilationCacheTable); +} + + void CompilationCacheRegExp::Put(Handle source, JSRegExp::Flags flags, Handle data) { HandleScope scope; - Handle table = GetTable(0); - CALL_HEAP_FUNCTION_VOID(table->PutRegExp(*source, flags, *data)); + SetFirstTable(TablePut(source, flags, data)); } diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index 557a91e..ebb62f1 100755 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -31,14 +31,14 @@ #include "codegen-inl.h" #include "compilation-cache.h" #include "compiler.h" +#include "data-flow.h" #include "debug.h" #include "fast-codegen.h" #include "full-codegen.h" +#include "liveedit.h" #include "oprofile-agent.h" #include "rewriter.h" #include "scopes.h" -#include "usage-analyzer.h" -#include "liveedit.h" namespace v8 { namespace internal { @@ -48,7 +48,7 @@ static Handle MakeCode(Handle context, CompilationInfo* info) { FunctionLiteral* function = info->function(); ASSERT(function != NULL); // Rewrite the AST by introducing .result assignments where needed. - if (!Rewriter::Process(function) || !AnalyzeVariableUsage(function)) { + if (!Rewriter::Process(function)) { // Signal a stack overflow by returning a null handle. The stack // overflow exception will be thrown by the caller. return Handle::null(); @@ -79,6 +79,17 @@ static Handle MakeCode(Handle context, CompilationInfo* info) { return Handle::null(); } + if (FLAG_use_flow_graph) { + FlowGraphBuilder builder; + builder.Build(function); + +#ifdef DEBUG + if (FLAG_print_graph_text) { + builder.graph()->PrintText(builder.postorder()); + } +#endif + } + // Generate code and return it. Code generator selection is governed by // which backends are enabled and whether the function is considered // run-once code or not: @@ -117,6 +128,14 @@ static Handle MakeCode(Handle context, CompilationInfo* info) { } +#ifdef ENABLE_DEBUGGER_SUPPORT +Handle MakeCodeForLiveEdit(CompilationInfo* info) { + Handle context = Handle::null(); + return MakeCode(context, info); +} +#endif + + static Handle MakeFunction(bool is_global, bool is_eval, Compiler::ValidationState validate, @@ -224,7 +243,7 @@ static Handle MakeFunction(bool is_global, #ifdef ENABLE_DEBUGGER_SUPPORT // Notify debugger - Debugger::OnAfterCompile(script, fun); + Debugger::OnAfterCompile(script, Debugger::NO_AFTER_COMPILE_FLAGS); #endif return fun; @@ -444,6 +463,17 @@ Handle Compiler::BuildBoilerplate(FunctionLiteral* literal, return Handle::null(); } + if (FLAG_use_flow_graph) { + FlowGraphBuilder builder; + builder.Build(literal); + +#ifdef DEBUG + if (FLAG_print_graph_text) { + builder.graph()->PrintText(builder.postorder()); + } +#endif + } + // Generate code and return it. The way that the compilation mode // is controlled by the command-line flags is described in // the static helper function MakeCode. diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h index f01889d..8e220e6 100644 --- a/deps/v8/src/compiler.h +++ b/deps/v8/src/compiler.h @@ -276,6 +276,13 @@ class Compiler : public AllStatic { }; +#ifdef ENABLE_DEBUGGER_SUPPORT + +Handle MakeCodeForLiveEdit(CompilationInfo* info); + +#endif + + // During compilation we need a global list of handles to constants // for frame elements. When the zone gets deleted, we make sure to // clear this list of handles as well. diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h index 9baf072..98ebc47 100644 --- a/deps/v8/src/contexts.h +++ b/deps/v8/src/contexts.h @@ -95,6 +95,7 @@ enum ContextLookupFlags { call_as_constructor_delegate) \ V(EMPTY_SCRIPT_INDEX, Script, empty_script) \ V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \ + V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \ V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \ V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \ V(MAP_CACHE_INDEX, Object, map_cache) \ @@ -216,6 +217,7 @@ class Context: public FixedArray { CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, EMPTY_SCRIPT_INDEX, SCRIPT_FUNCTION_INDEX, + OPAQUE_REFERENCE_FUNCTION_INDEX, CONTEXT_EXTENSION_FUNCTION_INDEX, OUT_OF_MEMORY_INDEX, MAP_CACHE_INDEX, diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h index ba7220a..f7210d5 100644 --- a/deps/v8/src/conversions-inl.h +++ b/deps/v8/src/conversions-inl.h @@ -59,6 +59,32 @@ static inline int FastD2I(double x) { } +// The fast double-to-unsigned-int conversion routine does not guarantee +// rounding towards zero. +static inline unsigned int FastD2UI(double x) { + // There is no unsigned version of lrint, so there is no fast path + // in this function as there is in FastD2I. Using lrint doesn't work + // for values of 2^31 and above. + + // Convert "small enough" doubles to uint32_t by fixing the 32 + // least significant non-fractional bits in the low 32 bits of the + // double, and reading them from there. + const double k2Pow52 = 4503599627370496.0; + bool negative = x < 0; + if (negative) { + x = -x; + } + if (x < k2Pow52) { + x += k2Pow52; + uint32_t result; + memcpy(&result, &x, sizeof(result)); // Copy low 32 bits. + return negative ? ~result + 1 : result; + } + // Large number (outside uint32 range), Infinity or NaN. + return 0x80000000u; // Return integer indefinite. +} + + static inline double DoubleToInteger(double x) { if (isnan(x)) return 0; if (!isfinite(x) || x == 0) return x; diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h index 67f7d53..bdc7e44 100644 --- a/deps/v8/src/conversions.h +++ b/deps/v8/src/conversions.h @@ -32,11 +32,12 @@ namespace v8 { namespace internal { -// The fast double-to-int conversion routine does not guarantee +// The fast double-to-(unsigned-)int conversion routine does not guarantee // rounding towards zero. // The result is unspecified if x is infinite or NaN, or if the rounded // integer value is outside the range of type int. static inline int FastD2I(double x); +static inline unsigned int FastD2UI(double x); static inline double FastI2D(int x) { diff --git a/deps/v8/src/data-flow.cc b/deps/v8/src/data-flow.cc index 5e9d217..6b45da0 100644 --- a/deps/v8/src/data-flow.cc +++ b/deps/v8/src/data-flow.cc @@ -33,6 +33,540 @@ namespace v8 { namespace internal { +void FlowGraph::AppendInstruction(AstNode* instruction) { + ASSERT(instruction != NULL); + if (is_empty() || !exit()->IsBlockNode()) { + AppendNode(new BlockNode()); + } + BlockNode::cast(exit())->AddInstruction(instruction); +} + + +void FlowGraph::AppendNode(Node* node) { + ASSERT(node != NULL); + if (is_empty()) { + entry_ = exit_ = node; + } else { + exit()->AddSuccessor(node); + node->AddPredecessor(exit()); + exit_ = node; + } +} + + +void FlowGraph::AppendGraph(FlowGraph* graph) { + ASSERT(!graph->is_empty()); + if (is_empty()) { + entry_ = graph->entry(); + exit_ = graph->exit(); + } else { + exit()->AddSuccessor(graph->entry()); + graph->entry()->AddPredecessor(exit()); + exit_ = graph->exit(); + } +} + + +void FlowGraph::Split(BranchNode* branch, + FlowGraph* left, + FlowGraph* right, + JoinNode* merge) { + // Graphs are in edge split form. Add empty blocks if necessary. + if (left->is_empty()) left->AppendNode(new BlockNode()); + if (right->is_empty()) right->AppendNode(new BlockNode()); + + // Add the branch, left flowgraph and merge. + AppendNode(branch); + AppendGraph(left); + AppendNode(merge); + + // Splice in the right flowgraph. + right->AppendNode(merge); + branch->AddSuccessor(right->entry()); + right->entry()->AddPredecessor(branch); +} + + +void FlowGraph::Loop(JoinNode* merge, + FlowGraph* condition, + BranchNode* branch, + FlowGraph* body) { + // Add the merge, condition and branch. Add merge's predecessors in + // left-to-right order. + AppendNode(merge); + body->AppendNode(merge); + AppendGraph(condition); + AppendNode(branch); + + // Splice in the body flowgraph. + branch->AddSuccessor(body->entry()); + body->entry()->AddPredecessor(branch); +} + + +void EntryNode::Traverse(bool mark, + ZoneList* preorder, + ZoneList* postorder) { + ASSERT(successor_ != NULL); + preorder->Add(this); + if (!successor_->IsMarkedWith(mark)) { + successor_->MarkWith(mark); + successor_->Traverse(mark, preorder, postorder); + } + postorder->Add(this); +} + + +void ExitNode::Traverse(bool mark, + ZoneList* preorder, + ZoneList* postorder) { + preorder->Add(this); + postorder->Add(this); +} + + +void BlockNode::Traverse(bool mark, + ZoneList* preorder, + ZoneList* postorder) { + ASSERT(successor_ != NULL); + preorder->Add(this); + if (!successor_->IsMarkedWith(mark)) { + successor_->MarkWith(mark); + successor_->Traverse(mark, preorder, postorder); + } + postorder->Add(this); +} + + +void BranchNode::Traverse(bool mark, + ZoneList* preorder, + ZoneList* postorder) { + ASSERT(successor0_ != NULL && successor1_ != NULL); + preorder->Add(this); + if (!successor0_->IsMarkedWith(mark)) { + successor0_->MarkWith(mark); + successor0_->Traverse(mark, preorder, postorder); + } + if (!successor1_->IsMarkedWith(mark)) { + successor1_->MarkWith(mark); + successor1_->Traverse(mark, preorder, postorder); + } + postorder->Add(this); +} + + +void JoinNode::Traverse(bool mark, + ZoneList* preorder, + ZoneList* postorder) { + ASSERT(successor_ != NULL); + preorder->Add(this); + if (!successor_->IsMarkedWith(mark)) { + successor_->MarkWith(mark); + successor_->Traverse(mark, preorder, postorder); + } + postorder->Add(this); +} + + +void FlowGraphBuilder::Build(FunctionLiteral* lit) { + graph_ = FlowGraph::Empty(); + graph_.AppendNode(new EntryNode()); + global_exit_ = new ExitNode(); + VisitStatements(lit->body()); + + if (HasStackOverflow()) { + graph_ = FlowGraph::Empty(); + return; + } + + graph_.AppendNode(global_exit_); + + // Build preorder and postorder traversal orders. All the nodes in + // the graph have the same mark flag. For the traversal, use that + // flag's negation. Traversal will flip all the flags. + bool mark = graph_.entry()->IsMarkedWith(false); + graph_.entry()->MarkWith(mark); + graph_.entry()->Traverse(mark, &preorder_, &postorder_); +} + + +void FlowGraphBuilder::VisitDeclaration(Declaration* decl) { + UNREACHABLE(); +} + + +void FlowGraphBuilder::VisitBlock(Block* stmt) { + VisitStatements(stmt->statements()); +} + + +void FlowGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) { + Visit(stmt->expression()); +} + + +void FlowGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) { + // Nothing to do. +} + + +void FlowGraphBuilder::VisitIfStatement(IfStatement* stmt) { + Visit(stmt->condition()); + + BranchNode* branch = new BranchNode(); + FlowGraph original = graph_; + graph_ = FlowGraph::Empty(); + Visit(stmt->then_statement()); + + FlowGraph left = graph_; + graph_ = FlowGraph::Empty(); + Visit(stmt->else_statement()); + + JoinNode* join = new JoinNode(); + original.Split(branch, &left, &graph_, join); + graph_ = original; +} + + +void FlowGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) { + SetStackOverflow(); +} + + +void FlowGraphBuilder::VisitBreakStatement(BreakStatement* stmt) { + SetStackOverflow(); +} + + +void FlowGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) { + Visit(stmt->expression()); + graph_.AppendInstruction(stmt); + graph_.AppendNode(global_exit()); +} + + +void FlowGraphBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) { + Visit(stmt->expression()); + graph_.AppendInstruction(stmt); +} + + +void FlowGraphBuilder::VisitWithExitStatement(WithExitStatement* stmt) { + graph_.AppendInstruction(stmt); +} + + +void FlowGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) { + SetStackOverflow(); +} + + +void FlowGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) { + JoinNode* join = new JoinNode(); + FlowGraph original = graph_; + graph_ = FlowGraph::Empty(); + Visit(stmt->body()); + + FlowGraph body = graph_; + graph_ = FlowGraph::Empty(); + Visit(stmt->cond()); + + BranchNode* branch = new BranchNode(); + + // Add body, condition and branch. + original.AppendNode(join); + original.AppendGraph(&body); + original.AppendGraph(&graph_); // The condition. + original.AppendNode(branch); + + // Tie the knot. + branch->AddSuccessor(join); + join->AddPredecessor(branch); + + graph_ = original; +} + + +void FlowGraphBuilder::VisitWhileStatement(WhileStatement* stmt) { + JoinNode* join = new JoinNode(); + FlowGraph original = graph_; + graph_ = FlowGraph::Empty(); + Visit(stmt->cond()); + + BranchNode* branch = new BranchNode(); + FlowGraph condition = graph_; + graph_ = FlowGraph::Empty(); + Visit(stmt->body()); + + original.Loop(join, &condition, branch, &graph_); + graph_ = original; +} + + +void FlowGraphBuilder::VisitForStatement(ForStatement* stmt) { + if (stmt->init() != NULL) Visit(stmt->init()); + + JoinNode* join = new JoinNode(); + FlowGraph original = graph_; + graph_ = FlowGraph::Empty(); + if (stmt->cond() != NULL) Visit(stmt->cond()); + + BranchNode* branch = new BranchNode(); + FlowGraph condition = graph_; + graph_ = FlowGraph::Empty(); + Visit(stmt->body()); + + if (stmt->next() != NULL) Visit(stmt->next()); + + original.Loop(join, &condition, branch, &graph_); + graph_ = original; +} + + +void FlowGraphBuilder::VisitForInStatement(ForInStatement* stmt) { + Visit(stmt->enumerable()); + + JoinNode* join = new JoinNode(); + FlowGraph empty; + BranchNode* branch = new BranchNode(); + FlowGraph original = graph_; + graph_ = FlowGraph::Empty(); + Visit(stmt->body()); + + original.Loop(join, &empty, branch, &graph_); + graph_ = original; +} + + +void FlowGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) { + SetStackOverflow(); +} + + +void FlowGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) { + SetStackOverflow(); +} + + +void FlowGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) { + graph_.AppendInstruction(stmt); +} + + +void FlowGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) { + graph_.AppendInstruction(expr); +} + + +void FlowGraphBuilder::VisitFunctionBoilerplateLiteral( + FunctionBoilerplateLiteral* expr) { + graph_.AppendInstruction(expr); +} + + +void FlowGraphBuilder::VisitConditional(Conditional* expr) { + Visit(expr->condition()); + + BranchNode* branch = new BranchNode(); + FlowGraph original = graph_; + graph_ = FlowGraph::Empty(); + Visit(expr->then_expression()); + + FlowGraph left = graph_; + graph_ = FlowGraph::Empty(); + Visit(expr->else_expression()); + + JoinNode* join = new JoinNode(); + original.Split(branch, &left, &graph_, join); + graph_ = original; +} + + +void FlowGraphBuilder::VisitSlot(Slot* expr) { + UNREACHABLE(); +} + + +void FlowGraphBuilder::VisitVariableProxy(VariableProxy* expr) { + graph_.AppendInstruction(expr); +} + + +void FlowGraphBuilder::VisitLiteral(Literal* expr) { + graph_.AppendInstruction(expr); +} + + +void FlowGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) { + graph_.AppendInstruction(expr); +} + + +void FlowGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) { + ZoneList* properties = expr->properties(); + for (int i = 0, len = properties->length(); i < len; i++) { + Visit(properties->at(i)->value()); + } + graph_.AppendInstruction(expr); +} + + +void FlowGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) { + ZoneList* values = expr->values(); + for (int i = 0, len = values->length(); i < len; i++) { + Visit(values->at(i)); + } + graph_.AppendInstruction(expr); +} + + +void FlowGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) { + graph_.AppendInstruction(expr); +} + + +void FlowGraphBuilder::VisitAssignment(Assignment* expr) { + Variable* var = expr->target()->AsVariableProxy()->AsVariable(); + Property* prop = expr->target()->AsProperty(); + // Left-hand side can be a variable or property (or reference error) but + // not both. + ASSERT(var == NULL || prop == NULL); + if (var != NULL) { + Visit(expr->value()); + if (var->IsStackAllocated()) definitions_.Add(expr); + + } else if (prop != NULL) { + Visit(prop->obj()); + if (!prop->key()->IsPropertyName()) Visit(prop->key()); + Visit(expr->value()); + } + graph_.AppendInstruction(expr); +} + + +void FlowGraphBuilder::VisitThrow(Throw* expr) { + Visit(expr->exception()); + graph_.AppendInstruction(expr); +} + + +void FlowGraphBuilder::VisitProperty(Property* expr) { + Visit(expr->obj()); + if (!expr->key()->IsPropertyName()) Visit(expr->key()); + graph_.AppendInstruction(expr); +} + + +void FlowGraphBuilder::VisitCall(Call* expr) { + Visit(expr->expression()); + ZoneList* arguments = expr->arguments(); + for (int i = 0, len = arguments->length(); i < len; i++) { + Visit(arguments->at(i)); + } + graph_.AppendInstruction(expr); +} + + +void FlowGraphBuilder::VisitCallNew(CallNew* expr) { + Visit(expr->expression()); + ZoneList* arguments = expr->arguments(); + for (int i = 0, len = arguments->length(); i < len; i++) { + Visit(arguments->at(i)); + } + graph_.AppendInstruction(expr); +} + + +void FlowGraphBuilder::VisitCallRuntime(CallRuntime* expr) { + ZoneList* arguments = expr->arguments(); + for (int i = 0, len = arguments->length(); i < len; i++) { + Visit(arguments->at(i)); + } + graph_.AppendInstruction(expr); +} + + +void FlowGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) { + Visit(expr->expression()); + graph_.AppendInstruction(expr); +} + + +void FlowGraphBuilder::VisitCountOperation(CountOperation* expr) { + Visit(expr->expression()); + Variable* var = expr->expression()->AsVariableProxy()->AsVariable(); + if (var != NULL && var->IsStackAllocated()) { + definitions_.Add(expr); + } + graph_.AppendInstruction(expr); +} + + +void FlowGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) { + Visit(expr->left()); + + switch (expr->op()) { + case Token::COMMA: + Visit(expr->right()); + break; + + case Token::OR: { + BranchNode* branch = new BranchNode(); + FlowGraph original = graph_; + graph_ = FlowGraph::Empty(); + Visit(expr->right()); + FlowGraph empty; + JoinNode* join = new JoinNode(); + original.Split(branch, &empty, &graph_, join); + graph_ = original; + break; + } + + case Token::AND: { + BranchNode* branch = new BranchNode(); + FlowGraph original = graph_; + graph_ = FlowGraph::Empty(); + Visit(expr->right()); + FlowGraph empty; + JoinNode* join = new JoinNode(); + original.Split(branch, &graph_, &empty, join); + graph_ = original; + break; + } + + case Token::BIT_OR: + case Token::BIT_XOR: + case Token::BIT_AND: + case Token::SHL: + case Token::SAR: + case Token::SHR: + case Token::ADD: + case Token::SUB: + case Token::MUL: + case Token::DIV: + case Token::MOD: + Visit(expr->right()); + graph_.AppendInstruction(expr); + break; + + default: + UNREACHABLE(); + } +} + + +void FlowGraphBuilder::VisitCompareOperation(CompareOperation* expr) { + Visit(expr->left()); + Visit(expr->right()); + graph_.AppendInstruction(expr); +} + + +void FlowGraphBuilder::VisitThisFunction(ThisFunction* expr) { + graph_.AppendInstruction(expr); +} + + void AstLabeler::Label(CompilationInfo* info) { info_ = info; VisitStatements(info_->function()->body()); @@ -204,6 +738,9 @@ void AstLabeler::VisitAssignment(Assignment* expr) { USE(proxy); ASSERT(proxy != NULL && proxy->var()->is_this()); info()->set_has_this_properties(true); + + prop->obj()->set_num(AstNode::kNoNumber); + prop->key()->set_num(AstNode::kNoNumber); Visit(expr->value()); expr->set_num(next_number_++); } @@ -220,6 +757,9 @@ void AstLabeler::VisitProperty(Property* expr) { USE(proxy); ASSERT(proxy != NULL && proxy->var()->is_this()); info()->set_has_this_properties(true); + + expr->obj()->set_num(AstNode::kNoNumber); + expr->key()->set_num(AstNode::kNoNumber); expr->set_num(next_number_++); } @@ -558,4 +1098,370 @@ void LivenessAnalyzer::VisitThisFunction(ThisFunction* expr) { } +#ifdef DEBUG + +// Print a textual representation of an instruction in a flow graph. Using +// the AstVisitor is overkill because there is no recursion here. It is +// only used for printing in debug mode. +class TextInstructionPrinter: public AstVisitor { + public: + TextInstructionPrinter() {} + + private: + // AST node visit functions. +#define DECLARE_VISIT(type) virtual void Visit##type(type* node); + AST_NODE_LIST(DECLARE_VISIT) +#undef DECLARE_VISIT + + DISALLOW_COPY_AND_ASSIGN(TextInstructionPrinter); +}; + + +void TextInstructionPrinter::VisitDeclaration(Declaration* decl) { + UNREACHABLE(); +} + + +void TextInstructionPrinter::VisitBlock(Block* stmt) { + PrintF("Block"); +} + + +void TextInstructionPrinter::VisitExpressionStatement( + ExpressionStatement* stmt) { + PrintF("ExpressionStatement"); +} + + +void TextInstructionPrinter::VisitEmptyStatement(EmptyStatement* stmt) { + PrintF("EmptyStatement"); +} + + +void TextInstructionPrinter::VisitIfStatement(IfStatement* stmt) { + PrintF("IfStatement"); +} + + +void TextInstructionPrinter::VisitContinueStatement(ContinueStatement* stmt) { + UNREACHABLE(); +} + + +void TextInstructionPrinter::VisitBreakStatement(BreakStatement* stmt) { + UNREACHABLE(); +} + + +void TextInstructionPrinter::VisitReturnStatement(ReturnStatement* stmt) { + PrintF("return @%d", stmt->expression()->num()); +} + + +void TextInstructionPrinter::VisitWithEnterStatement(WithEnterStatement* stmt) { + PrintF("WithEnterStatement"); +} + + +void TextInstructionPrinter::VisitWithExitStatement(WithExitStatement* stmt) { + PrintF("WithExitStatement"); +} + + +void TextInstructionPrinter::VisitSwitchStatement(SwitchStatement* stmt) { + UNREACHABLE(); +} + + +void TextInstructionPrinter::VisitDoWhileStatement(DoWhileStatement* stmt) { + PrintF("DoWhileStatement"); +} + + +void TextInstructionPrinter::VisitWhileStatement(WhileStatement* stmt) { + PrintF("WhileStatement"); +} + + +void TextInstructionPrinter::VisitForStatement(ForStatement* stmt) { + PrintF("ForStatement"); +} + + +void TextInstructionPrinter::VisitForInStatement(ForInStatement* stmt) { + PrintF("ForInStatement"); +} + + +void TextInstructionPrinter::VisitTryCatchStatement(TryCatchStatement* stmt) { + UNREACHABLE(); +} + + +void TextInstructionPrinter::VisitTryFinallyStatement( + TryFinallyStatement* stmt) { + UNREACHABLE(); +} + + +void TextInstructionPrinter::VisitDebuggerStatement(DebuggerStatement* stmt) { + PrintF("DebuggerStatement"); +} + + +void TextInstructionPrinter::VisitFunctionLiteral(FunctionLiteral* expr) { + PrintF("FunctionLiteral"); +} + + +void TextInstructionPrinter::VisitFunctionBoilerplateLiteral( + FunctionBoilerplateLiteral* expr) { + PrintF("FunctionBoilerplateLiteral"); +} + + +void TextInstructionPrinter::VisitConditional(Conditional* expr) { + PrintF("Conditional"); +} + + +void TextInstructionPrinter::VisitSlot(Slot* expr) { + UNREACHABLE(); +} + + +void TextInstructionPrinter::VisitVariableProxy(VariableProxy* expr) { + Variable* var = expr->AsVariable(); + if (var != NULL) { + SmartPointer name = var->name()->ToCString(); + PrintF("%s", *name); + } else { + ASSERT(expr->AsProperty() != NULL); + VisitProperty(expr->AsProperty()); + } +} + + +void TextInstructionPrinter::VisitLiteral(Literal* expr) { + expr->handle()->ShortPrint(); +} + + +void TextInstructionPrinter::VisitRegExpLiteral(RegExpLiteral* expr) { + PrintF("RegExpLiteral"); +} + + +void TextInstructionPrinter::VisitObjectLiteral(ObjectLiteral* expr) { + PrintF("ObjectLiteral"); +} + + +void TextInstructionPrinter::VisitArrayLiteral(ArrayLiteral* expr) { + PrintF("ArrayLiteral"); +} + + +void TextInstructionPrinter::VisitCatchExtensionObject( + CatchExtensionObject* expr) { + PrintF("CatchExtensionObject"); +} + + +void TextInstructionPrinter::VisitAssignment(Assignment* expr) { + Variable* var = expr->target()->AsVariableProxy()->AsVariable(); + Property* prop = expr->target()->AsProperty(); + + if (var != NULL) { + SmartPointer name = var->name()->ToCString(); + PrintF("%s %s @%d", + *name, + Token::String(expr->op()), + expr->value()->num()); + } else if (prop != NULL) { + if (prop->key()->IsPropertyName()) { + PrintF("@%d.", prop->obj()->num()); + ASSERT(prop->key()->AsLiteral() != NULL); + prop->key()->AsLiteral()->handle()->Print(); + PrintF(" %s @%d", + Token::String(expr->op()), + expr->value()->num()); + } else { + PrintF("@%d[@%d] %s @%d", + prop->obj()->num(), + prop->key()->num(), + Token::String(expr->op()), + expr->value()->num()); + } + } else { + // Throw reference error. + Visit(expr->target()); + } +} + + +void TextInstructionPrinter::VisitThrow(Throw* expr) { + PrintF("throw @%d", expr->exception()->num()); +} + + +void TextInstructionPrinter::VisitProperty(Property* expr) { + if (expr->key()->IsPropertyName()) { + PrintF("@%d.", expr->obj()->num()); + ASSERT(expr->key()->AsLiteral() != NULL); + expr->key()->AsLiteral()->handle()->Print(); + } else { + PrintF("@%d[@%d]", expr->obj()->num(), expr->key()->num()); + } +} + + +void TextInstructionPrinter::VisitCall(Call* expr) { + PrintF("@%d(", expr->expression()->num()); + ZoneList* arguments = expr->arguments(); + for (int i = 0, len = arguments->length(); i < len; i++) { + if (i != 0) PrintF(", "); + PrintF("@%d", arguments->at(i)->num()); + } + PrintF(")"); +} + + +void TextInstructionPrinter::VisitCallNew(CallNew* expr) { + PrintF("new @%d(", expr->expression()->num()); + ZoneList* arguments = expr->arguments(); + for (int i = 0, len = arguments->length(); i < len; i++) { + if (i != 0) PrintF(", "); + PrintF("@%d", arguments->at(i)->num()); + } + PrintF(")"); +} + + +void TextInstructionPrinter::VisitCallRuntime(CallRuntime* expr) { + SmartPointer name = expr->name()->ToCString(); + PrintF("%s(", *name); + ZoneList* arguments = expr->arguments(); + for (int i = 0, len = arguments->length(); i < len; i++) { + if (i != 0) PrintF(", "); + PrintF("@%d", arguments->at(i)->num()); + } + PrintF(")"); +} + + +void TextInstructionPrinter::VisitUnaryOperation(UnaryOperation* expr) { + PrintF("%s(@%d)", Token::String(expr->op()), expr->expression()->num()); +} + + +void TextInstructionPrinter::VisitCountOperation(CountOperation* expr) { + if (expr->is_prefix()) { + PrintF("%s@%d", Token::String(expr->op()), expr->expression()->num()); + } else { + PrintF("@%d%s", expr->expression()->num(), Token::String(expr->op())); + } +} + + +void TextInstructionPrinter::VisitBinaryOperation(BinaryOperation* expr) { + ASSERT(expr->op() != Token::COMMA); + ASSERT(expr->op() != Token::OR); + ASSERT(expr->op() != Token::AND); + PrintF("@%d %s @%d", + expr->left()->num(), + Token::String(expr->op()), + expr->right()->num()); +} + + +void TextInstructionPrinter::VisitCompareOperation(CompareOperation* expr) { + PrintF("@%d %s @%d", + expr->left()->num(), + Token::String(expr->op()), + expr->right()->num()); +} + + +void TextInstructionPrinter::VisitThisFunction(ThisFunction* expr) { + PrintF("ThisFunction"); +} + + +static int node_count = 0; +static int instruction_count = 0; + + +void Node::AssignNumbers() { + set_number(node_count++); +} + + +void BlockNode::AssignNumbers() { + set_number(node_count++); + for (int i = 0, len = instructions_.length(); i < len; i++) { + instructions_[i]->set_num(instruction_count++); + } +} + + +void EntryNode::PrintText() { + PrintF("L%d: Entry\n", number()); + PrintF("goto L%d\n\n", successor_->number()); +} + +void ExitNode::PrintText() { + PrintF("L%d: Exit\n\n", number()); +} + + +void BlockNode::PrintText() { + // Print the instructions in the block. + PrintF("L%d: Block\n", number()); + TextInstructionPrinter printer; + for (int i = 0, len = instructions_.length(); i < len; i++) { + PrintF("%d ", instructions_[i]->num()); + printer.Visit(instructions_[i]); + PrintF("\n"); + } + PrintF("goto L%d\n\n", successor_->number()); +} + + +void BranchNode::PrintText() { + PrintF("L%d: Branch\n", number()); + PrintF("goto (L%d, L%d)\n\n", successor0_->number(), successor1_->number()); +} + + +void JoinNode::PrintText() { + PrintF("L%d: Join(", number()); + for (int i = 0, len = predecessors_.length(); i < len; i++) { + if (i != 0) PrintF(", "); + PrintF("L%d", predecessors_[i]->number()); + } + PrintF(")\ngoto L%d\n\n", successor_->number()); +} + + +void FlowGraph::PrintText(ZoneList* postorder) { + PrintF("\n========\n"); + + // Number nodes and instructions in reverse postorder. + node_count = 0; + instruction_count = 0; + for (int i = postorder->length() - 1; i >= 0; i--) { + postorder->at(i)->AssignNumbers(); + } + + // Print basic blocks in reverse postorder. + for (int i = postorder->length() - 1; i >= 0; i--) { + postorder->at(i)->PrintText(); + } +} + + +#endif // defined(DEBUG) + + } } // namespace v8::internal diff --git a/deps/v8/src/data-flow.h b/deps/v8/src/data-flow.h index 2331944..2dc2d73 100644 --- a/deps/v8/src/data-flow.h +++ b/deps/v8/src/data-flow.h @@ -28,12 +28,403 @@ #ifndef V8_DATAFLOW_H_ #define V8_DATAFLOW_H_ +#include "v8.h" + #include "ast.h" #include "compiler.h" +#include "zone-inl.h" namespace v8 { namespace internal { +class BitVector: public ZoneObject { + public: + explicit BitVector(int length) + : length_(length), + data_length_(SizeFor(length)), + data_(Zone::NewArray(data_length_)) { + ASSERT(length > 0); + Clear(); + } + + BitVector(const BitVector& other) + : length_(other.length()), + data_length_(SizeFor(length_)), + data_(Zone::NewArray(data_length_)) { + CopyFrom(other); + } + + static int SizeFor(int length) { + return 1 + ((length - 1) / 32); + } + + BitVector& operator=(const BitVector& rhs) { + if (this != &rhs) CopyFrom(rhs); + return *this; + } + + void CopyFrom(const BitVector& other) { + ASSERT(other.length() == length()); + for (int i = 0; i < data_length_; i++) { + data_[i] = other.data_[i]; + } + } + + bool Contains(int i) { + ASSERT(i >= 0 && i < length()); + uint32_t block = data_[i / 32]; + return (block & (1U << (i % 32))) != 0; + } + + void Add(int i) { + ASSERT(i >= 0 && i < length()); + data_[i / 32] |= (1U << (i % 32)); + } + + void Remove(int i) { + ASSERT(i >= 0 && i < length()); + data_[i / 32] &= ~(1U << (i % 32)); + } + + void Union(const BitVector& other) { + ASSERT(other.length() == length()); + for (int i = 0; i < data_length_; i++) { + data_[i] |= other.data_[i]; + } + } + + void Intersect(const BitVector& other) { + ASSERT(other.length() == length()); + for (int i = 0; i < data_length_; i++) { + data_[i] &= other.data_[i]; + } + } + + void Clear() { + for (int i = 0; i < data_length_; i++) { + data_[i] = 0; + } + } + + bool IsEmpty() const { + for (int i = 0; i < data_length_; i++) { + if (data_[i] != 0) return false; + } + return true; + } + + int length() const { return length_; } + + private: + int length_; + int data_length_; + uint32_t* data_; +}; + + +// Forward declarations of Node types. +class Node; +class BranchNode; +class JoinNode; + +// Flow graphs have a single entry and single exit. The empty flowgraph is +// represented by both entry and exit being NULL. +class FlowGraph BASE_EMBEDDED { + public: + FlowGraph() : entry_(NULL), exit_(NULL) {} + + static FlowGraph Empty() { return FlowGraph(); } + + bool is_empty() const { return entry_ == NULL; } + Node* entry() const { return entry_; } + Node* exit() const { return exit_; } + + // Add a single instruction to the end of this flowgraph. + void AppendInstruction(AstNode* instruction); + + // Add a single node to the end of this flow graph. + void AppendNode(Node* node); + + // Add a flow graph fragment to the end of this one. + void AppendGraph(FlowGraph* graph); + + // Concatenate an if-then-else flow-graph to this one. Control is split + // and merged, so the graph remains single-entry, single-exit. + void Split(BranchNode* branch, + FlowGraph* left, + FlowGraph* right, + JoinNode* merge); + + // Concatenate a forward loop (e.g., while or for loop) flow-graph to this + // one. Control is split by the condition and merged back from the back + // edge at end of the body to the beginning of the condition. The single + // (free) exit of the result graph is the right (false) arm of the branch + // node. + void Loop(JoinNode* merge, + FlowGraph* condition, + BranchNode* branch, + FlowGraph* body); + +#ifdef DEBUG + void PrintText(ZoneList* postorder); +#endif + + private: + Node* entry_; + Node* exit_; +}; + + +// Flow-graph nodes. +class Node: public ZoneObject { + public: + Node() : number_(-1), mark_(false) {} + + virtual ~Node() {} + + virtual bool IsBlockNode() { return false; } + virtual bool IsJoinNode() { return false; } + + virtual void AddPredecessor(Node* predecessor) = 0; + virtual void AddSuccessor(Node* successor) = 0; + + bool IsMarkedWith(bool mark) { return mark_ == mark; } + void MarkWith(bool mark) { mark_ = mark; } + + // Perform a depth first search and record preorder and postorder + // traversal orders. + virtual void Traverse(bool mark, + ZoneList* preorder, + ZoneList* postorder) = 0; + + int number() { return number_; } + void set_number(int number) { number_ = number; } + +#ifdef DEBUG + virtual void AssignNumbers(); + virtual void PrintText() = 0; +#endif + + private: + int number_; + bool mark_; + + DISALLOW_COPY_AND_ASSIGN(Node); +}; + + +// An entry node has no predecessors and a single successor. +class EntryNode: public Node { + public: + EntryNode() : successor_(NULL) {} + + void AddPredecessor(Node* predecessor) { UNREACHABLE(); } + + void AddSuccessor(Node* successor) { + ASSERT(successor_ == NULL && successor != NULL); + successor_ = successor; + } + + void Traverse(bool mark, + ZoneList* preorder, + ZoneList* postorder); + +#ifdef DEBUG + void PrintText(); +#endif + + private: + Node* successor_; + + DISALLOW_COPY_AND_ASSIGN(EntryNode); +}; + + +// An exit node has a arbitrarily many predecessors and no successors. +class ExitNode: public Node { + public: + ExitNode() : predecessors_(4) {} + + void AddPredecessor(Node* predecessor) { + ASSERT(predecessor != NULL); + predecessors_.Add(predecessor); + } + + void AddSuccessor(Node* successor) { /* Do nothing. */ } + + void Traverse(bool mark, + ZoneList* preorder, + ZoneList* postorder); + +#ifdef DEBUG + void PrintText(); +#endif + + private: + ZoneList predecessors_; + + DISALLOW_COPY_AND_ASSIGN(ExitNode); +}; + + +// Block nodes have a single successor and predecessor and a list of +// instructions. +class BlockNode: public Node { + public: + BlockNode() : predecessor_(NULL), successor_(NULL), instructions_(4) {} + + static BlockNode* cast(Node* node) { + ASSERT(node->IsBlockNode()); + return reinterpret_cast(node); + } + + bool IsBlockNode() { return true; } + + void AddPredecessor(Node* predecessor) { + ASSERT(predecessor_ == NULL && predecessor != NULL); + predecessor_ = predecessor; + } + + void AddSuccessor(Node* successor) { + ASSERT(successor_ == NULL && successor != NULL); + successor_ = successor; + } + + void AddInstruction(AstNode* instruction) { + instructions_.Add(instruction); + } + + void Traverse(bool mark, + ZoneList* preorder, + ZoneList* postorder); + +#ifdef DEBUG + void AssignNumbers(); + void PrintText(); +#endif + + private: + Node* predecessor_; + Node* successor_; + ZoneList instructions_; + + DISALLOW_COPY_AND_ASSIGN(BlockNode); +}; + + +// Branch nodes have a single predecessor and a pair of successors. +class BranchNode: public Node { + public: + BranchNode() : predecessor_(NULL), successor0_(NULL), successor1_(NULL) {} + + void AddPredecessor(Node* predecessor) { + ASSERT(predecessor_ == NULL && predecessor != NULL); + predecessor_ = predecessor; + } + + void AddSuccessor(Node* successor) { + ASSERT(successor1_ == NULL && successor != NULL); + if (successor0_ == NULL) { + successor0_ = successor; + } else { + successor1_ = successor; + } + } + + void Traverse(bool mark, + ZoneList* preorder, + ZoneList* postorder); + +#ifdef DEBUG + void PrintText(); +#endif + + private: + Node* predecessor_; + Node* successor0_; + Node* successor1_; + + DISALLOW_COPY_AND_ASSIGN(BranchNode); +}; + + +// Join nodes have arbitrarily many predecessors and a single successor. +class JoinNode: public Node { + public: + JoinNode() : predecessors_(2), successor_(NULL) {} + + static JoinNode* cast(Node* node) { + ASSERT(node->IsJoinNode()); + return reinterpret_cast(node); + } + + bool IsJoinNode() { return true; } + + void AddPredecessor(Node* predecessor) { + ASSERT(predecessor != NULL); + predecessors_.Add(predecessor); + } + + void AddSuccessor(Node* successor) { + ASSERT(successor_ == NULL && successor != NULL); + successor_ = successor; + } + + void Traverse(bool mark, + ZoneList* preorder, + ZoneList* postorder); + +#ifdef DEBUG + void PrintText(); +#endif + + private: + ZoneList predecessors_; + Node* successor_; + + DISALLOW_COPY_AND_ASSIGN(JoinNode); +}; + + +// Construct a flow graph from a function literal. Build pre- and postorder +// traversal orders as a byproduct. +class FlowGraphBuilder: public AstVisitor { + public: + FlowGraphBuilder() + : global_exit_(NULL), + preorder_(4), + postorder_(4), + definitions_(4) { + } + + void Build(FunctionLiteral* lit); + + FlowGraph* graph() { return &graph_; } + + ZoneList* postorder() { return &postorder_; } + + private: + ExitNode* global_exit() { return global_exit_; } + + // AST node visit functions. +#define DECLARE_VISIT(type) virtual void Visit##type(type* node); + AST_NODE_LIST(DECLARE_VISIT) +#undef DECLARE_VISIT + + FlowGraph graph_; + ExitNode* global_exit_; + ZoneList preorder_; + ZoneList postorder_; + + // The flow graph builder collects a list of definitions (assignments and + // count operations) to stack-allocated variables to use for reaching + // definitions analysis. + ZoneList definitions_; + + DISALLOW_COPY_AND_ASSIGN(FlowGraphBuilder); +}; + + // This class is used to number all expressions in the AST according to // their evaluation order (post-order left-to-right traversal). class AstLabeler: public AstVisitor { diff --git a/deps/v8/src/date-delay.js b/deps/v8/src/date-delay.js index 7d8f458..c0180c2 100644 --- a/deps/v8/src/date-delay.js +++ b/deps/v8/src/date-delay.js @@ -113,8 +113,11 @@ function EquivalentTime(t) { // we must do this, but for compatibility with other browsers, we use // the actual year if it is in the range 1970..2037 if (t >= 0 && t <= 2.1e12) return t; - var day = MakeDay(EquivalentYear(YEAR_FROM_TIME(t)), MONTH_FROM_TIME(t), DATE_FROM_TIME(t)); - return TimeClip(MakeDate(day, TimeWithinDay(t))); + + var day = MakeDay(EquivalentYear(YEAR_FROM_TIME(t)), + MONTH_FROM_TIME(t), + DATE_FROM_TIME(t)); + return MakeDate(day, TimeWithinDay(t)); } @@ -257,14 +260,6 @@ function TimeInYear(year) { } -// Compute modified Julian day from year, month, date. -function ToJulianDay(year, month, date) { - var jy = (month > 1) ? year : year - 1; - var jm = (month > 1) ? month + 2 : month + 14; - var ja = FLOOR(jy / 100); - return FLOOR(FLOOR(365.25*jy) + FLOOR(30.6001*jm) + date + 1720995) + 2 - ja + FLOOR(0.25*ja); -} - var four_year_cycle_table = CalculateDateTable(); @@ -359,20 +354,18 @@ function FromJulianDay(julian) { function MakeDay(year, month, date) { if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) return $NaN; - // Conversion to integers. year = TO_INTEGER(year); month = TO_INTEGER(month); date = TO_INTEGER(date); - // Overflow months into year. - year = year + FLOOR(month/12); - month = month % 12; - if (month < 0) { - month += 12; + if (year < kMinYear || year > kMaxYear || + month < kMinMonth || month > kMaxMonth || + date < kMinDate || date > kMaxDate) { + return $NaN; } - // Return days relative to Jan 1 1970. - return ToJulianDay(year, month, date) - kDayZeroInJulianDay; + // Now we rely on year, month and date being SMIs. + return %DateMakeDay(year, month, date); } diff --git a/deps/v8/src/debug-delay.js b/deps/v8/src/debug-delay.js index 55c25a9..5ba5a3b 100644 --- a/deps/v8/src/debug-delay.js +++ b/deps/v8/src/debug-delay.js @@ -1251,7 +1251,9 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request) } else if (request.command == 'version') { this.versionRequest_(request, response); } else if (request.command == 'profile') { - this.profileRequest_(request, response); + this.profileRequest_(request, response); + } else if (request.command == 'changelive') { + this.changeLiveRequest_(request, response); } else { throw new Error('Unknown command "' + request.command + '" in request'); } @@ -1954,6 +1956,52 @@ DebugCommandProcessor.prototype.profileRequest_ = function(request, response) { }; +DebugCommandProcessor.prototype.changeLiveRequest_ = function(request, response) { + if (!Debug.LiveEditChangeScript) { + return response.failed('LiveEdit feature is not supported'); + } + if (!request.arguments) { + return response.failed('Missing arguments'); + } + var script_id = request.arguments.script_id; + var change_pos = parseInt(request.arguments.change_pos); + var change_len = parseInt(request.arguments.change_len); + var new_string = request.arguments.new_string; + if (!IS_STRING(new_string)) { + response.failed('Argument "new_string" is not a string value'); + return; + } + + var scripts = %DebugGetLoadedScripts(); + + var the_script = null; + for (var i = 0; i < scripts.length; i++) { + if (scripts[i].id == script_id) { + the_script = scripts[i]; + } + } + if (!the_script) { + response.failed('Script not found'); + return; + } + + var change_log = new Array(); + try { + Debug.LiveEditChangeScript(the_script, change_pos, change_len, new_string, + change_log); + } catch (e) { + if (e instanceof Debug.LiveEditChangeScript.Failure) { + // Let's treat it as a "success" so that body with change_log will be + // sent back. "change_log" will have "failure" field set. + change_log.push( { failure: true } ); + } else { + throw e; + } + } + response.body = {change_log: change_log}; +}; + + // Check whether the previously processed command caused the VM to become // running. DebugCommandProcessor.prototype.isRunning = function() { diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc index 8c4f51d..959bea1 100644 --- a/deps/v8/src/debug.cc +++ b/deps/v8/src/debug.cc @@ -39,6 +39,7 @@ #include "global-handles.h" #include "ic.h" #include "ic-inl.h" +#include "messages.h" #include "natives.h" #include "stub-cache.h" #include "log.h" @@ -123,7 +124,9 @@ void BreakLocationIterator::Next() { if (RelocInfo::IsCodeTarget(rmode())) { Address target = original_rinfo()->target_address(); Code* code = Code::GetCodeFromTargetAddress(target); - if (code->is_inline_cache_stub() || RelocInfo::IsConstructCall(rmode())) { + if ((code->is_inline_cache_stub() && + code->kind() != Code::BINARY_OP_IC) || + RelocInfo::IsConstructCall(rmode())) { break_point_++; return; } @@ -755,6 +758,12 @@ bool Debug::Load() { bool caught_exception = !CompileDebuggerScript(Natives::GetIndex("mirror")) || !CompileDebuggerScript(Natives::GetIndex("debug")); + + if (FLAG_enable_liveedit) { + caught_exception = caught_exception || + !CompileDebuggerScript(Natives::GetIndex("liveedit")); + } + Debugger::set_compiling_natives(false); // Make sure we mark the debugger as not loading before we might @@ -1337,24 +1346,26 @@ Handle Debug::FindDebugBreak(Handle code, RelocInfo::Mode mode) { // Find the builtin debug break function matching the calling convention // used by the call site. if (code->is_inline_cache_stub()) { - if (code->is_call_stub()) { - return ComputeCallDebugBreak(code->arguments_count()); - } - if (code->is_load_stub()) { - return Handle(Builtins::builtin(Builtins::LoadIC_DebugBreak)); - } - if (code->is_store_stub()) { - return Handle(Builtins::builtin(Builtins::StoreIC_DebugBreak)); - } - if (code->is_keyed_load_stub()) { - Handle result = - Handle(Builtins::builtin(Builtins::KeyedLoadIC_DebugBreak)); - return result; - } - if (code->is_keyed_store_stub()) { - Handle result = - Handle(Builtins::builtin(Builtins::KeyedStoreIC_DebugBreak)); - return result; + switch (code->kind()) { + case Code::CALL_IC: + return ComputeCallDebugBreak(code->arguments_count()); + + case Code::LOAD_IC: + return Handle(Builtins::builtin(Builtins::LoadIC_DebugBreak)); + + case Code::STORE_IC: + return Handle(Builtins::builtin(Builtins::StoreIC_DebugBreak)); + + case Code::KEYED_LOAD_IC: + return Handle( + Builtins::builtin(Builtins::KeyedLoadIC_DebugBreak)); + + case Code::KEYED_STORE_IC: + return Handle( + Builtins::builtin(Builtins::KeyedStoreIC_DebugBreak)); + + default: + UNREACHABLE(); } } if (RelocInfo::IsConstructCall(mode)) { @@ -1959,7 +1970,8 @@ void Debugger::OnBeforeCompile(Handle