From cbdf3393a21690178822c2d6ce2513270d70a02e Mon Sep 17 00:00:00 2001 From: isaacs Date: Tue, 29 May 2012 15:05:24 -0700 Subject: [PATCH] Upgrade v8 to 3.11.7 --- deps/v8/ChangeLog | 62 +++ deps/v8/Makefile | 1 + deps/v8/SConstruct | 4 +- deps/v8/build/common.gypi | 285 ++++++------ deps/v8/include/v8.h | 8 +- deps/v8/src/SConscript | 1 + deps/v8/src/api.cc | 31 +- deps/v8/src/api.h | 4 +- deps/v8/src/arm/builtins-arm.cc | 9 +- deps/v8/src/arm/code-stubs-arm.cc | 35 +- deps/v8/src/arm/codegen-arm.cc | 4 +- deps/v8/src/arm/full-codegen-arm.cc | 105 +---- deps/v8/src/arm/ic-arm.cc | 14 +- deps/v8/src/arm/lithium-arm.cc | 5 +- deps/v8/src/arm/lithium-arm.h | 9 +- deps/v8/src/arm/lithium-codegen-arm.cc | 104 +++-- deps/v8/src/arm/macro-assembler-arm.cc | 86 ++-- deps/v8/src/arm/macro-assembler-arm.h | 9 +- deps/v8/src/arm/regexp-macro-assembler-arm.cc | 170 +++++--- deps/v8/src/arm/regexp-macro-assembler-arm.h | 13 +- deps/v8/src/arm/simulator-arm.h | 12 +- deps/v8/src/arm/stub-cache-arm.cc | 50 ++- deps/v8/src/array.js | 10 +- deps/v8/src/bootstrapper.cc | 20 +- deps/v8/src/builtins.cc | 99 +++-- deps/v8/src/code-stubs.cc | 32 +- deps/v8/src/codegen.h | 6 +- deps/v8/src/compiler.cc | 2 +- deps/v8/src/contexts.h | 20 +- deps/v8/src/d8.cc | 22 +- deps/v8/src/d8.h | 1 - deps/v8/src/debug-agent.cc | 32 +- deps/v8/src/elements-kind.cc | 134 ++++++ deps/v8/src/elements-kind.h | 210 +++++++++ deps/v8/src/elements.cc | 370 +++++++++++----- deps/v8/src/elements.h | 5 + deps/v8/src/factory.cc | 5 +- deps/v8/src/factory.h | 17 +- deps/v8/src/flag-definitions.h | 3 + deps/v8/src/full-codegen.cc | 3 +- deps/v8/src/func-name-inferrer.h | 2 + deps/v8/src/heap-inl.h | 20 +- deps/v8/src/heap.cc | 47 +- deps/v8/src/heap.h | 2 +- deps/v8/src/hydrogen-instructions.cc | 23 +- deps/v8/src/hydrogen-instructions.h | 182 ++++++-- deps/v8/src/hydrogen.cc | 213 ++++++--- deps/v8/src/hydrogen.h | 2 + deps/v8/src/ia32/builtins-ia32.cc | 9 +- deps/v8/src/ia32/code-stubs-ia32.cc | 38 +- deps/v8/src/ia32/codegen-ia32.cc | 4 +- deps/v8/src/ia32/debug-ia32.cc | 2 +- deps/v8/src/ia32/full-codegen-ia32.cc | 109 +---- deps/v8/src/ia32/ic-ia32.cc | 16 +- deps/v8/src/ia32/lithium-codegen-ia32.cc | 123 +++--- deps/v8/src/ia32/lithium-codegen-ia32.h | 3 +- deps/v8/src/ia32/lithium-ia32.cc | 8 +- deps/v8/src/ia32/lithium-ia32.h | 12 +- deps/v8/src/ia32/macro-assembler-ia32.cc | 88 ++-- deps/v8/src/ia32/macro-assembler-ia32.h | 9 +- deps/v8/src/ia32/regexp-macro-assembler-ia32.cc | 156 +++++-- deps/v8/src/ia32/regexp-macro-assembler-ia32.h | 13 +- deps/v8/src/ia32/simulator-ia32.h | 8 +- deps/v8/src/ia32/stub-cache-ia32.cc | 27 +- deps/v8/src/ic.cc | 65 ++- deps/v8/src/ic.h | 8 +- deps/v8/src/incremental-marking-inl.h | 24 +- deps/v8/src/incremental-marking.cc | 41 +- deps/v8/src/incremental-marking.h | 15 +- deps/v8/src/isolate.h | 2 +- deps/v8/src/jsregexp.cc | 28 +- deps/v8/src/jsregexp.h | 26 +- deps/v8/src/lithium.cc | 7 +- deps/v8/src/mark-compact-inl.h | 28 +- deps/v8/src/mark-compact.cc | 158 +++---- deps/v8/src/mark-compact.h | 57 ++- deps/v8/src/messages.js | 183 ++++---- deps/v8/src/mips/builtins-mips.cc | 9 +- deps/v8/src/mips/code-stubs-mips.cc | 38 +- deps/v8/src/mips/codegen-mips.cc | 4 +- deps/v8/src/mips/full-codegen-mips.cc | 106 +---- deps/v8/src/mips/ic-mips.cc | 23 +- deps/v8/src/mips/lithium-codegen-mips.cc | 114 +++-- deps/v8/src/mips/lithium-mips.cc | 5 +- deps/v8/src/mips/lithium-mips.h | 10 +- deps/v8/src/mips/macro-assembler-mips.cc | 87 ++-- deps/v8/src/mips/macro-assembler-mips.h | 9 +- deps/v8/src/mips/regexp-macro-assembler-mips.cc | 160 ++++--- deps/v8/src/mips/regexp-macro-assembler-mips.h | 11 +- deps/v8/src/mips/simulator-mips.h | 10 +- deps/v8/src/mips/stub-cache-mips.cc | 51 ++- deps/v8/src/objects-debug.cc | 52 ++- deps/v8/src/objects-inl.h | 247 ++++++----- deps/v8/src/objects-printer.cc | 5 +- deps/v8/src/objects.cc | 484 ++++++++++++--------- deps/v8/src/objects.h | 119 +++-- deps/v8/src/parser.cc | 20 +- deps/v8/src/platform-posix.cc | 33 +- deps/v8/src/platform-win32.cc | 18 +- deps/v8/src/platform.h | 3 +- deps/v8/src/profile-generator-inl.h | 20 - deps/v8/src/profile-generator.cc | 259 +---------- deps/v8/src/profile-generator.h | 46 +- deps/v8/src/regexp-macro-assembler-irregexp.cc | 3 +- deps/v8/src/regexp-macro-assembler-irregexp.h | 4 +- deps/v8/src/regexp-macro-assembler-tracer.cc | 11 +- deps/v8/src/regexp-macro-assembler-tracer.h | 2 +- deps/v8/src/regexp-macro-assembler.cc | 10 +- deps/v8/src/regexp-macro-assembler.h | 12 +- deps/v8/src/regexp.js | 6 + deps/v8/src/runtime.cc | 464 +++++++++++--------- deps/v8/src/runtime.h | 10 +- deps/v8/src/scopes.cc | 20 + deps/v8/src/scopes.h | 9 +- deps/v8/src/string-stream.cc | 4 +- deps/v8/src/v8-counters.h | 2 - deps/v8/src/version.cc | 2 +- deps/v8/src/x64/builtins-x64.cc | 9 +- deps/v8/src/x64/code-stubs-x64.cc | 41 +- deps/v8/src/x64/codegen-x64.cc | 4 +- deps/v8/src/x64/debug-x64.cc | 13 +- deps/v8/src/x64/disasm-x64.cc | 2 +- deps/v8/src/x64/full-codegen-x64.cc | 116 +---- deps/v8/src/x64/ic-x64.cc | 14 +- deps/v8/src/x64/lithium-codegen-x64.cc | 170 +++++--- deps/v8/src/x64/lithium-codegen-x64.h | 3 +- deps/v8/src/x64/lithium-x64.cc | 5 +- deps/v8/src/x64/lithium-x64.h | 9 +- deps/v8/src/x64/macro-assembler-x64.cc | 88 ++-- deps/v8/src/x64/macro-assembler-x64.h | 9 +- deps/v8/src/x64/regexp-macro-assembler-x64.cc | 173 +++++--- deps/v8/src/x64/regexp-macro-assembler-x64.h | 25 +- deps/v8/src/x64/simulator-x64.h | 8 +- deps/v8/src/x64/stub-cache-x64.cc | 41 +- deps/v8/test/cctest/cctest.status | 1 + deps/v8/test/cctest/test-func-name-inference.cc | 38 ++ deps/v8/test/cctest/test-heap-profiler.cc | 92 +--- deps/v8/test/cctest/test-heap.cc | 65 ++- deps/v8/test/cctest/test-mark-compact.cc | 10 +- deps/v8/test/cctest/test-regexp.cc | 22 +- deps/v8/test/mjsunit/accessor-map-sharing.js | 2 +- deps/v8/test/mjsunit/array-construct-transition.js | 6 +- deps/v8/test/mjsunit/array-literal-transitions.js | 20 +- deps/v8/test/mjsunit/elements-kind.js | 8 +- .../test/mjsunit/elements-transition-hoisting.js | 4 +- deps/v8/test/mjsunit/elements-transition.js | 10 +- deps/v8/test/mjsunit/packed-elements.js | 112 +++++ deps/v8/test/mjsunit/regexp-global.js | 132 ++++++ deps/v8/test/mjsunit/regexp.js | 11 + deps/v8/test/mjsunit/regress/regress-117409.js | 2 +- deps/v8/test/mjsunit/regress/regress-128018.js | 35 ++ deps/v8/test/mjsunit/regress/regress-128146.js | 33 ++ deps/v8/test/mjsunit/regress/regress-1849.js | 6 +- deps/v8/test/mjsunit/regress/regress-1878.js | 4 +- deps/v8/test/mjsunit/regress/regress-2071.js | 79 ++++ deps/v8/test/mjsunit/regress/regress-2153.js | 32 ++ .../test/mjsunit/regress/regress-crbug-122271.js | 8 +- .../mjsunit/regress/regress-smi-only-concat.js | 4 +- .../test/mjsunit/regress/regress-transcendental.js | 49 +++ deps/v8/test/mjsunit/stack-traces.js | 14 + deps/v8/test/mjsunit/unbox-double-arrays.js | 7 +- deps/v8/tools/fuzz-harness.sh | 92 ++++ deps/v8/tools/grokdump.py | 207 ++++++--- deps/v8/tools/gyp/v8.gyp | 60 +-- deps/v8/tools/js2c.py | 6 +- deps/v8/tools/jsmin.py | 4 +- deps/v8/tools/test-wrapper-gypbuild.py | 15 +- 167 files changed, 5054 insertions(+), 3138 deletions(-) create mode 100644 deps/v8/src/elements-kind.cc create mode 100644 deps/v8/src/elements-kind.h create mode 100644 deps/v8/test/mjsunit/packed-elements.js create mode 100644 deps/v8/test/mjsunit/regexp-global.js create mode 100644 deps/v8/test/mjsunit/regress/regress-128018.js create mode 100644 deps/v8/test/mjsunit/regress/regress-128146.js create mode 100644 deps/v8/test/mjsunit/regress/regress-2071.js create mode 100644 deps/v8/test/mjsunit/regress/regress-2153.js create mode 100644 deps/v8/test/mjsunit/regress/regress-transcendental.js create mode 100644 deps/v8/tools/fuzz-harness.sh diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index c52a5ab..f64809d 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,65 @@ +2012-05-29: Version 3.11.7 + + Get better function names in stack traces. + + Performance and stability improvements on all platforms. + + +2012-05-24: Version 3.11.6 + + Fixed RegExp.prototype.toString for incompatible receivers + (issue 1981). + + Performance and stability improvements on all platforms. + + +2012-05-23: Version 3.11.5 + + Performance and stability improvements on all platforms. + + +2012-05-22: Version 3.11.4 + + Some cleanup to common.gypi. This fixes some host/target combinations + that weren't working in the Make build on Mac. + + Handle EINTR in socket functions and continue incomplete sends. + (issue 2098) + + Fixed python deprecations. (issue 1391) + + Made socket send and receive more robust and return 0 on failure. + (Chromium issue 15719) + + Fixed GCC 4.7 (C++11) compilation. (issue 2136) + + Set '-m32' option for host and target platforms + + Performance and stability improvements on all platforms. + + +2012-05-18: Version 3.11.3 + + Disable optimization for functions that have scopes that cannot be + reconstructed from the context chain. (issue 2071) + + Define V8_EXPORT to nothing for clients of v8. (Chromium issue 90078) + + Correctly check for native error objects. (Chromium issue 2138) + + Performance and stability improvements on all platforms. + + +2012-05-16: Version 3.11.2 + + Revert r11496. (Chromium issue 128146) + + Implement map collection for incremental marking. (issue 1465) + + Add toString method to CallSite (which describes a frame of the + stack trace). + + 2012-05-15: Version 3.11.1 Added a readbuffer function to d8 that reads a file into an ArrayBuffer. diff --git a/deps/v8/Makefile b/deps/v8/Makefile index fbca566..0d825c0 100644 --- a/deps/v8/Makefile +++ b/deps/v8/Makefile @@ -228,6 +228,7 @@ $(OUTDIR)/Makefile.android: $(GYPFILES) $(ENVFILE) build/android.gypi \ must-set-ANDROID_NDK_ROOT GYP_GENERATORS=make \ CC="${ANDROID_TOOL_PREFIX}-gcc" \ + CXX="${ANDROID_TOOL_PREFIX}-g++" \ build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \ -Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \ -S.android $(GYPFLAGS) diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct index b0d1344..ebce7ff 100644 --- a/deps/v8/SConstruct +++ b/deps/v8/SConstruct @@ -101,14 +101,14 @@ LIBRARY_FLAGS = { 'os:linux': { 'CCFLAGS': ['-ansi'] + GCC_EXTRA_CCFLAGS, 'library:shared': { - 'CPPDEFINES': ['V8_SHARED'], + 'CPPDEFINES': ['V8_SHARED', 'BUILDING_V8_SHARED'], 'LIBS': ['pthread'] } }, 'os:macos': { 'CCFLAGS': ['-ansi', '-mmacosx-version-min=10.4'], 'library:shared': { - 'CPPDEFINES': ['V8_SHARED'] + 'CPPDEFINES': ['V8_SHARED', 'BUILDING_V8_SHARED'], } }, 'os:freebsd': { diff --git a/deps/v8/build/common.gypi b/deps/v8/build/common.gypi index 327562d..7f084b8 100644 --- a/deps/v8/build/common.gypi +++ b/deps/v8/build/common.gypi @@ -110,151 +110,117 @@ ['v8_enable_gdbjit==1', { 'defines': ['ENABLE_GDB_JIT_INTERFACE',], }], - ['OS!="mac"', { - # TODO(mark): The OS!="mac" conditional is temporary. It can be - # removed once the Mac Chromium build stops setting target_arch to - # ia32 and instead sets it to mac. Other checks in this file for - # OS=="mac" can be removed at that time as well. This can be cleaned - # up once http://crbug.com/44205 is fixed. + ['v8_target_arch=="arm"', { + 'defines': [ + 'V8_TARGET_ARCH_ARM', + ], 'conditions': [ - ['v8_target_arch=="arm"', { + [ 'v8_can_use_unaligned_accesses=="true"', { 'defines': [ - 'V8_TARGET_ARCH_ARM', + 'CAN_USE_UNALIGNED_ACCESSES=1', ], - 'conditions': [ - [ 'v8_can_use_unaligned_accesses=="true"', { - 'defines': [ - 'CAN_USE_UNALIGNED_ACCESSES=1', - ], - }], - [ 'v8_can_use_unaligned_accesses=="false"', { - 'defines': [ - 'CAN_USE_UNALIGNED_ACCESSES=0', - ], - }], - [ 'v8_can_use_vfp_instructions=="true"', { - 'defines': [ - 'CAN_USE_VFP_INSTRUCTIONS', - ], - }], - [ 'v8_use_arm_eabi_hardfloat=="true"', { - 'defines': [ - 'USE_EABI_HARDFLOAT=1', - 'CAN_USE_VFP_INSTRUCTIONS', - ], - 'target_conditions': [ - ['_toolset=="target"', { - 'cflags': ['-mfloat-abi=hard',], - }], - ], - }, { - 'defines': [ - 'USE_EABI_HARDFLOAT=0', - ], - }], - # The ARM assembler assumes the host is 32 bits, - # so force building 32-bit host tools. - ['host_arch=="x64" or OS=="android"', { - 'target_conditions': [ - ['_toolset=="host"', { - 'cflags': ['-m32'], - 'ldflags': ['-m32'], - }], - ], - }], + }], + [ 'v8_can_use_unaligned_accesses=="false"', { + 'defines': [ + 'CAN_USE_UNALIGNED_ACCESSES=0', ], }], - ['v8_target_arch=="ia32"', { + [ 'v8_can_use_vfp_instructions=="true"', { 'defines': [ - 'V8_TARGET_ARCH_IA32', + 'CAN_USE_VFP_INSTRUCTIONS', ], }], - ['v8_target_arch=="mips"', { + [ 'v8_use_arm_eabi_hardfloat=="true"', { 'defines': [ - 'V8_TARGET_ARCH_MIPS', + 'USE_EABI_HARDFLOAT=1', + 'CAN_USE_VFP_INSTRUCTIONS', ], - 'variables': { - 'mipscompiler': '&1 | grep -q "^Target: mips-" && echo "yes" || echo "no")', - }, - 'conditions': [ - ['mipscompiler=="yes"', { - 'target_conditions': [ - ['_toolset=="target"', { - 'cflags': ['-EL'], - 'ldflags': ['-EL'], - 'conditions': [ - [ 'v8_use_mips_abi_hardfloat=="true"', { - 'cflags': ['-mhard-float'], - 'ldflags': ['-mhard-float'], - }, { - 'cflags': ['-msoft-float'], - 'ldflags': ['-msoft-float'], - }], - ['mips_arch_variant=="mips32r2"', { - 'cflags': ['-mips32r2', '-Wa,-mips32r2'], - }], - ['mips_arch_variant=="loongson"', { - 'cflags': ['-mips3', '-Wa,-mips3'], - }, { - 'cflags': ['-mips32', '-Wa,-mips32'], - }], - ], - }], - ], + 'target_conditions': [ + ['_toolset=="target"', { + 'cflags': ['-mfloat-abi=hard',], }], - [ 'v8_can_use_fpu_instructions=="true"', { - 'defines': [ - 'CAN_USE_FPU_INSTRUCTIONS', - ], - }], - [ 'v8_use_mips_abi_hardfloat=="true"', { - 'defines': [ - '__mips_hard_float=1', - 'CAN_USE_FPU_INSTRUCTIONS', - ], - }, { - 'defines': [ - '__mips_soft_float=1' - ], - }], - ['mips_arch_variant=="mips32r2"', { - 'defines': ['_MIPS_ARCH_MIPS32R2',], - }], - ['mips_arch_variant=="loongson"', { - 'defines': ['_MIPS_ARCH_LOONGSON',], - }], - # The MIPS assembler assumes the host is 32 bits, - # so force building 32-bit host tools. - ['host_arch=="x64"', { - 'target_conditions': [ - ['_toolset=="host"', { - 'cflags': ['-m32'], - 'ldflags': ['-m32'], + ], + }, { + 'defines': [ + 'USE_EABI_HARDFLOAT=0', + ], + }], + ], + }], # v8_target_arch=="arm" + ['v8_target_arch=="ia32"', { + 'defines': [ + 'V8_TARGET_ARCH_IA32', + ], + }], # v8_target_arch=="ia32" + ['v8_target_arch=="mips"', { + 'defines': [ + 'V8_TARGET_ARCH_MIPS', + ], + 'variables': { + 'mipscompiler': '&1 | grep -q "^Target: mips-" && echo "yes" || echo "no")', + }, + 'conditions': [ + ['mipscompiler=="yes"', { + 'target_conditions': [ + ['_toolset=="target"', { + 'cflags': ['-EL'], + 'ldflags': ['-EL'], + 'conditions': [ + [ 'v8_use_mips_abi_hardfloat=="true"', { + 'cflags': ['-mhard-float'], + 'ldflags': ['-mhard-float'], + }, { + 'cflags': ['-msoft-float'], + 'ldflags': ['-msoft-float'], + }], + ['mips_arch_variant=="mips32r2"', { + 'cflags': ['-mips32r2', '-Wa,-mips32r2'], + }], + ['mips_arch_variant=="loongson"', { + 'cflags': ['-mips3', '-Wa,-mips3'], + }, { + 'cflags': ['-mips32', '-Wa,-mips32'], }], ], }], ], }], - ['v8_target_arch=="x64"', { + [ 'v8_can_use_fpu_instructions=="true"', { 'defines': [ - 'V8_TARGET_ARCH_X64', + 'CAN_USE_FPU_INSTRUCTIONS', ], }], - ], - }, { # Section for OS=="mac". - 'conditions': [ - ['target_arch=="ia32"', { - 'xcode_settings': { - 'ARCHS': ['i386'], - } + [ 'v8_use_mips_abi_hardfloat=="true"', { + 'defines': [ + '__mips_hard_float=1', + 'CAN_USE_FPU_INSTRUCTIONS', + ], + }, { + 'defines': [ + '__mips_soft_float=1' + ], }], - ['target_arch=="x64"', { - 'xcode_settings': { - 'ARCHS': ['x86_64'], - } + ['mips_arch_variant=="mips32r2"', { + 'defines': ['_MIPS_ARCH_MIPS32R2',], + }], + ['mips_arch_variant=="loongson"', { + 'defines': ['_MIPS_ARCH_LOONGSON',], }], ], - }], + }], # v8_target_arch=="mips" + ['v8_target_arch=="x64"', { + 'defines': [ + 'V8_TARGET_ARCH_X64', + ], + 'xcode_settings': { + 'ARCHS': [ 'x86_64' ], + }, + 'msvs_settings': { + 'VCLinkerTool': { + 'StackReserveSize': '2097152', + }, + }, + }], # v8_target_arch=="x64" ['v8_use_liveobjectlist=="true"', { 'defines': [ 'ENABLE_DEBUGGER_SUPPORT', @@ -272,6 +238,11 @@ 'defines': [ 'WIN32', ], + 'msvs_configuration_attributes': { + 'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)', + 'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)', + 'CharacterSet': '1', + }, }], ['OS=="win" and v8_enable_prof==1', { 'msvs_settings': { @@ -280,24 +251,9 @@ }, }, }], - ['OS=="win" and v8_target_arch=="x64"', { - 'msvs_settings': { - 'VCLinkerTool': { - 'StackReserveSize': '2097152', - }, - }, - }], ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ or OS=="netbsd"', { 'conditions': [ - [ 'v8_target_arch!="x64"', { - # Pass -m32 to the compiler iff it understands the flag. - 'variables': { - 'm32flag': ' /dev/null 2>&1) && echo "-m32" || true)', - }, - 'cflags': [ '<(m32flag)' ], - 'ldflags': [ '<(m32flag)' ], - }], [ 'v8_no_strict_aliasing==1', { 'cflags': [ '-fno-strict-aliasing' ], }], @@ -306,6 +262,41 @@ ['OS=="solaris"', { 'defines': [ '__C99FEATURES__=1' ], # isinf() etc. }], + ['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ + or OS=="netbsd" or OS=="mac" or OS=="android") and \ + (v8_target_arch=="arm" or v8_target_arch=="ia32" or \ + v8_target_arch=="mips")', { + # Check whether the host compiler and target compiler support the + # '-m32' option and set it if so. + 'target_conditions': [ + ['_toolset=="host"', { + 'variables': { + 'm32flag': ' /dev/null 2>&1) && echo -n "-m32" || true)', + }, + 'cflags': [ '<(m32flag)' ], + 'ldflags': [ '<(m32flag)' ], + 'xcode_settings': { + 'ARCHS': [ 'i386' ], + }, + }], + ['_toolset=="target"', { + 'variables': { + 'm32flag': ' /dev/null 2>&1) && echo -n "-m32" || true)', + }, + 'cflags': [ '<(m32flag)' ], + 'ldflags': [ '<(m32flag)' ], + 'xcode_settings': { + 'ARCHS': [ 'i386' ], + }, + }], + ], + }], + ['OS=="freebsd" or OS=="openbsd"', { + 'cflags': [ '-I/usr/local/include' ], + }], + ['OS=="netbsd"', { + 'cflags': [ '-I/usr/pkg/include' ], + }], ], # conditions 'configurations': { 'Debug': { @@ -332,14 +323,8 @@ }, }, 'conditions': [ - ['OS=="freebsd" or OS=="openbsd"', { - 'cflags': [ '-I/usr/local/include' ], - }], - ['OS=="netbsd"', { - 'cflags': [ '-I/usr/pkg/include' ], - }], ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', { - 'cflags': [ '-Wno-unused-parameter', + 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', '-Wnon-virtual-dtor', '-Woverloaded-virtual' ], }], ], @@ -367,12 +352,6 @@ }], ], }], - ['OS=="freebsd" or OS=="openbsd"', { - 'cflags': [ '-I/usr/local/include' ], - }], - ['OS=="netbsd"', { - 'cflags': [ '-I/usr/pkg/include' ], - }], ['OS=="mac"', { 'xcode_settings': { 'GCC_OPTIMIZATION_LEVEL': '3', # -O3 @@ -385,10 +364,6 @@ }, }], # OS=="mac" ['OS=="win"', { - 'msvs_configuration_attributes': { - 'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)', - 'CharacterSet': '1', - }, 'msvs_settings': { 'VCCLCompilerTool': { 'Optimization': '2', diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 9024531..d31ef54 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -62,11 +62,13 @@ #else // _WIN32 -// Setup for Linux shared library export. There is no need to distinguish -// between building or using the V8 shared library, but we should not -// export symbols when we are building a static library. +// Setup for Linux shared library export. #if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED) +#ifdef BUILDING_V8_SHARED #define V8EXPORT __attribute__ ((visibility("default"))) +#else +#define V8EXPORT +#endif #else // defined(__GNUC__) && (__GNUC__ >= 4) #define V8EXPORT #endif // defined(__GNUC__) && (__GNUC__ >= 4) diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript index 0d0b535..2482b37 100755 --- a/deps/v8/src/SConscript +++ b/deps/v8/src/SConscript @@ -68,6 +68,7 @@ SOURCES = { diy-fp.cc dtoa.cc elements.cc + elements-kind.cc execution.cc factory.cc flags.cc diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 52a84ed..74886f0 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -5040,7 +5040,7 @@ Local Array::CloneElementAt(uint32_t index) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); ON_BAILOUT(isolate, "v8::Array::CloneElementAt()", return Local()); i::Handle self = Utils::OpenHandle(this); - if (!self->HasFastElements()) { + if (!self->HasFastObjectElements()) { return Local(); } i::FixedArray* elms = i::FixedArray::cast(self->elements()); @@ -6045,13 +6045,6 @@ int HeapGraphNode::GetSelfSize() const { } -int HeapGraphNode::GetRetainedSize() const { - i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainedSize"); - return ToInternal(this)->retained_size(); -} - - int HeapGraphNode::GetChildrenCount() const { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapSnapshot::GetChildrenCount"); @@ -6067,28 +6060,6 @@ const HeapGraphEdge* HeapGraphNode::GetChild(int index) const { } -int HeapGraphNode::GetRetainersCount() const { - i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainersCount"); - return ToInternal(this)->retainers().length(); -} - - -const HeapGraphEdge* HeapGraphNode::GetRetainer(int index) const { - i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainer"); - return reinterpret_cast( - ToInternal(this)->retainers()[index]); -} - - -const HeapGraphNode* HeapGraphNode::GetDominatorNode() const { - i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapSnapshot::GetDominatorNode"); - return reinterpret_cast(ToInternal(this)->dominator()); -} - - v8::Handle HeapGraphNode::GetHeapValue() const { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapGraphNode::GetHeapValue"); diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h index 3ad57f4..05e5e72 100644 --- a/deps/v8/src/api.h +++ b/deps/v8/src/api.h @@ -105,13 +105,13 @@ NeanderArray::NeanderArray(v8::internal::Handle obj) v8::internal::Object* NeanderObject::get(int offset) { - ASSERT(value()->HasFastElements()); + ASSERT(value()->HasFastObjectElements()); return v8::internal::FixedArray::cast(value()->elements())->get(offset); } void NeanderObject::set(int offset, v8::internal::Object* value) { - ASSERT(value_->HasFastElements()); + ASSERT(value_->HasFastObjectElements()); v8::internal::FixedArray::cast(value_->elements())->set(offset, value); } diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index c99e778..578bd81 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -114,7 +114,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm, Label* gc_required) { const int initial_capacity = JSArray::kPreallocatedArrayElements; STATIC_ASSERT(initial_capacity >= 0); - __ LoadInitialArrayMap(array_function, scratch2, scratch1); + __ LoadInitialArrayMap(array_function, scratch2, scratch1, false); // Allocate the JSArray object together with space for a fixed array with the // requested elements. @@ -208,7 +208,8 @@ static void AllocateJSArray(MacroAssembler* masm, bool fill_with_hole, Label* gc_required) { // Load the initial map from the array function. - __ LoadInitialArrayMap(array_function, scratch2, elements_array_storage); + __ LoadInitialArrayMap(array_function, scratch2, + elements_array_storage, fill_with_hole); if (FLAG_debug_code) { // Assert that array size is not zero. __ tst(array_size, array_size); @@ -440,10 +441,10 @@ static void ArrayNativeCode(MacroAssembler* masm, __ b(call_generic_code); __ bind(¬_double); - // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS. + // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS. // r3: JSArray __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); - __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, r2, r9, diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index ad2ab7e..2296490 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -4824,27 +4824,32 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2); // Isolates: note we add an additional parameter here (isolate pointer). - const int kRegExpExecuteArguments = 8; + const int kRegExpExecuteArguments = 9; const int kParameterRegisters = 4; __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); // Stack pointer now points to cell where return address is to be written. // Arguments are before that on the stack or in registers. - // Argument 8 (sp[16]): Pass current isolate address. + // Argument 9 (sp[20]): Pass current isolate address. __ mov(r0, Operand(ExternalReference::isolate_address())); - __ str(r0, MemOperand(sp, 4 * kPointerSize)); + __ str(r0, MemOperand(sp, 5 * kPointerSize)); - // Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript. + // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript. __ mov(r0, Operand(1)); - __ str(r0, MemOperand(sp, 3 * kPointerSize)); + __ str(r0, MemOperand(sp, 4 * kPointerSize)); - // Argument 6 (sp[8]): Start (high end) of backtracking stack memory area. + // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area. __ mov(r0, Operand(address_of_regexp_stack_memory_address)); __ ldr(r0, MemOperand(r0, 0)); __ mov(r2, Operand(address_of_regexp_stack_memory_size)); __ ldr(r2, MemOperand(r2, 0)); __ add(r0, r0, Operand(r2)); + __ str(r0, MemOperand(sp, 3 * kPointerSize)); + + // Argument 6: Set the number of capture registers to zero to force global + // regexps to behave as non-global. This does not affect non-global regexps. + __ mov(r0, Operand(0)); __ str(r0, MemOperand(sp, 2 * kPointerSize)); // Argument 5 (sp[4]): static offsets vector buffer. @@ -4893,7 +4898,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Check the result. Label success; - __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS)); + __ cmp(r0, Operand(1)); + // We expect exactly one result since we force the called regexp to behave + // as non-global. __ b(eq, &success); Label failure; __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE)); @@ -7095,8 +7102,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { // KeyedStoreStubCompiler::GenerateStoreFastElement. { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET }, { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET }, - // ElementsTransitionGenerator::GenerateSmiOnlyToObject - // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble + // ElementsTransitionGenerator::GenerateMapChangeElementTransition + // and ElementsTransitionGenerator::GenerateSmiToDouble // and ElementsTransitionGenerator::GenerateDoubleToObject { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET }, { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET }, @@ -7359,9 +7366,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { Label fast_elements; __ CheckFastElements(r2, r5, &double_elements); - // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS + // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS __ JumpIfSmi(r0, &smi_element); - __ CheckFastSmiOnlyElements(r2, r5, &fast_elements); + __ CheckFastSmiElements(r2, r5, &fast_elements); // Store into the array literal requires a elements transition. Call into // the runtime. @@ -7373,7 +7380,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { __ Push(r5, r4); __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); - // Array literal has ElementsKind of FAST_ELEMENTS and value is an object. + // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. __ bind(&fast_elements); __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); @@ -7384,8 +7391,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); __ Ret(); - // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or - // FAST_ELEMENTS, and value is Smi. + // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, + // and value is Smi. __ bind(&smi_element); __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index befd8f2..e00afb9 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -73,7 +73,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { // ------------------------------------------------------------------------- // Code generators -void ElementsTransitionGenerator::GenerateSmiOnlyToObject( +void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r0 : value @@ -96,7 +96,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToObject( } -void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( +void ElementsTransitionGenerator::GenerateSmiToDouble( MacroAssembler* masm, Label* fail) { // ----------- S t a t e ------------- // -- r0 : value diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index 9f44872..2a5887a 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -1701,7 +1701,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { ASSERT_EQ(2, constant_elements->length()); ElementsKind constant_elements_kind = static_cast(Smi::cast(constant_elements->get(0))->value()); - bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS; + bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind); Handle constant_elements_values( FixedArrayBase::cast(constant_elements->get(1))); @@ -1722,8 +1722,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3); } else { - ASSERT(constant_elements_kind == FAST_ELEMENTS || - constant_elements_kind == FAST_SMI_ONLY_ELEMENTS || + ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) || FLAG_smi_only_arrays); FastCloneShallowArrayStub::Mode mode = has_fast_elements ? FastCloneShallowArrayStub::CLONE_ELEMENTS @@ -1751,7 +1750,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { } VisitForAccumulatorValue(subexpr); - if (constant_elements_kind == FAST_ELEMENTS) { + if (IsFastObjectElementsKind(constant_elements_kind)) { int offset = FixedArray::kHeaderSize + (i * kPointerSize); __ ldr(r6, MemOperand(sp)); // Copy of array literal. __ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset)); @@ -3466,104 +3465,6 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) { } -void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) { - ZoneList* args = expr->arguments(); - ASSERT(args->length() == 3); - VisitForStackValue(args->at(0)); - VisitForStackValue(args->at(1)); - VisitForStackValue(args->at(2)); - Label done; - Label slow_case; - Register object = r0; - Register index1 = r1; - Register index2 = r2; - Register elements = r3; - Register scratch1 = r4; - Register scratch2 = r5; - - __ ldr(object, MemOperand(sp, 2 * kPointerSize)); - // Fetch the map and check if array is in fast case. - // Check that object doesn't require security checks and - // has no indexed interceptor. - __ CompareObjectType(object, scratch1, scratch2, JS_ARRAY_TYPE); - __ b(ne, &slow_case); - // Map is now in scratch1. - - __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset)); - __ tst(scratch2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask)); - __ b(ne, &slow_case); - - // Check the object's elements are in fast case and writable. - __ ldr(elements, FieldMemOperand(object, JSObject::kElementsOffset)); - __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); - __ cmp(scratch1, ip); - __ b(ne, &slow_case); - - // Check that both indices are smis. - __ ldr(index1, MemOperand(sp, 1 * kPointerSize)); - __ ldr(index2, MemOperand(sp, 0)); - __ JumpIfNotBothSmi(index1, index2, &slow_case); - - // Check that both indices are valid. - __ ldr(scratch1, FieldMemOperand(object, JSArray::kLengthOffset)); - __ cmp(scratch1, index1); - __ cmp(scratch1, index2, hi); - __ b(ls, &slow_case); - - // Bring the address of the elements into index1 and index2. - __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ add(index1, - scratch1, - Operand(index1, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ add(index2, - scratch1, - Operand(index2, LSL, kPointerSizeLog2 - kSmiTagSize)); - - // Swap elements. - __ ldr(scratch1, MemOperand(index1, 0)); - __ ldr(scratch2, MemOperand(index2, 0)); - __ str(scratch1, MemOperand(index2, 0)); - __ str(scratch2, MemOperand(index1, 0)); - - Label no_remembered_set; - __ CheckPageFlag(elements, - scratch1, - 1 << MemoryChunk::SCAN_ON_SCAVENGE, - ne, - &no_remembered_set); - // Possible optimization: do a check that both values are Smis - // (or them and test against Smi mask.) - - // We are swapping two objects in an array and the incremental marker never - // pauses in the middle of scanning a single object. Therefore the - // incremental marker is not disturbed, so we don't need to call the - // RecordWrite stub that notifies the incremental marker. - __ RememberedSetHelper(elements, - index1, - scratch2, - kDontSaveFPRegs, - MacroAssembler::kFallThroughAtEnd); - __ RememberedSetHelper(elements, - index2, - scratch2, - kDontSaveFPRegs, - MacroAssembler::kFallThroughAtEnd); - - __ bind(&no_remembered_set); - // We are done. Drop elements from the stack, and return undefined. - __ Drop(3); - __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); - __ jmp(&done); - - __ bind(&slow_case); - __ CallRuntime(Runtime::kSwapElements, 3); - - __ bind(&done); - context()->Plug(r0); -} - - void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList* args = expr->arguments(); ASSERT_EQ(2, args->length()); diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index c12c167..fd93480 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -1249,7 +1249,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) { // Must return the modified receiver in r0. if (!FLAG_trace_elements_transitions) { Label fail; - ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail); + ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail); __ mov(r0, r2); __ Ret(); __ bind(&fail); @@ -1462,27 +1462,27 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex); __ b(ne, &non_double_value); - // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS -> + // Value is a double. Transition FAST_SMI_ELEMENTS -> // FAST_DOUBLE_ELEMENTS and complete the store. - __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, r4, &slow); ASSERT(receiver_map.is(r3)); // Transition code expects map in r3 - ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow); + ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ jmp(&fast_double_without_map_check); __ bind(&non_double_value); - // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS - __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, receiver_map, r4, &slow); ASSERT(receiver_map.is(r3)); // Transition code expects map in r3 - ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm); + ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ jmp(&finish_object_store); diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index 5c60f53..c97831a 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -2082,8 +2082,9 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LInstruction* LChunkBuilder::DoTransitionElementsKind( HTransitionElementsKind* instr) { - if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS && - instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) { + ElementsKind from_kind = instr->original_map()->elements_kind(); + ElementsKind to_kind = instr->transitioned_map()->elements_kind(); + if (IsSimpleMapChangeTransition(from_kind, to_kind)) { LOperand* object = UseRegister(instr->object()); LOperand* new_map_reg = TempRegister(); LTransitionElementsKind* result = diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h index ec8aac8..dbae813 100644 --- a/deps/v8/src/arm/lithium-arm.h +++ b/deps/v8/src/arm/lithium-arm.h @@ -1236,6 +1236,7 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> { LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1252,13 +1253,13 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> { LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { public: - LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, - LOperand* key) { + LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) { inputs_[0] = external_pointer; inputs_[1] = key; } @@ -1272,6 +1273,7 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1740,6 +1742,7 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> { LOperand* object() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } LOperand* value() { return inputs_[2]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1762,6 +1765,7 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> { LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } LOperand* value() { return inputs_[2]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } }; @@ -1806,6 +1810,7 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> { ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index bf11ab9..fd4b3e8 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -2696,8 +2696,10 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) { __ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset)); __ ubfx(scratch, scratch, Map::kElementsKindShift, Map::kElementsKindBitCount); - __ cmp(scratch, Operand(FAST_ELEMENTS)); - __ b(eq, &done); + __ cmp(scratch, Operand(GetInitialFastElementsKind())); + __ b(lt, &fail); + __ cmp(scratch, Operand(TERMINAL_FAST_ELEMENTS_KIND)); + __ b(le, &done); __ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND)); __ b(lt, &fail); __ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND)); @@ -2744,7 +2746,9 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { // Load the result. __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); - __ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize)); + uint32_t offset = FixedArray::kHeaderSize + + (instr->additional_index() << kPointerSizeLog2); + __ ldr(result, FieldMemOperand(scratch, offset)); // Check for the hole value. if (instr->hydrogen()->RequiresHoleCheck()) { @@ -2776,18 +2780,21 @@ void LCodeGen::DoLoadKeyedFastDoubleElement( } Operand operand = key_is_constant - ? Operand(constant_key * (1 << shift_size) + + ? Operand(((constant_key + instr->additional_index()) << shift_size) + FixedDoubleArray::kHeaderSize - kHeapObjectTag) : Operand(key, LSL, shift_size); __ add(elements, elements, operand); if (!key_is_constant) { __ add(elements, elements, - Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) + + (instr->additional_index() << shift_size))); } - __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); - __ cmp(scratch, Operand(kHoleNanUpper32)); - DeoptimizeIf(eq, instr->environment()); + if (instr->hydrogen()->RequiresHoleCheck()) { + __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); + __ cmp(scratch, Operand(kHoleNanUpper32)); + DeoptimizeIf(eq, instr->environment()); + } __ vldr(result, elements, 0); } @@ -2809,26 +2816,33 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( key = ToRegister(instr->key()); } int shift_size = ElementsKindToShiftSize(elements_kind); + int additional_offset = instr->additional_index() << shift_size; if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { CpuFeatures::Scope scope(VFP3); DwVfpRegister result = ToDoubleRegister(instr->result()); Operand operand = key_is_constant - ? Operand(constant_key * (1 << shift_size)) + ? Operand(constant_key << shift_size) : Operand(key, LSL, shift_size); __ add(scratch0(), external_pointer, operand); if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { - __ vldr(result.low(), scratch0(), 0); + __ vldr(result.low(), scratch0(), additional_offset); __ vcvt_f64_f32(result, result.low()); } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS - __ vldr(result, scratch0(), 0); + __ vldr(result, scratch0(), additional_offset); } } else { Register result = ToRegister(instr->result()); + if (instr->additional_index() != 0 && !key_is_constant) { + __ add(scratch0(), key, Operand(instr->additional_index())); + } MemOperand mem_operand(key_is_constant - ? MemOperand(external_pointer, constant_key * (1 << shift_size)) - : MemOperand(external_pointer, key, LSL, shift_size)); + ? MemOperand(external_pointer, + (constant_key << shift_size) + additional_offset) + : (instr->additional_index() == 0 + ? MemOperand(external_pointer, key, LSL, shift_size) + : MemOperand(external_pointer, scratch0(), LSL, shift_size))); switch (elements_kind) { case EXTERNAL_BYTE_ELEMENTS: __ ldrsb(result, mem_operand); @@ -2856,9 +2870,12 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( break; case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3726,10 +3743,16 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); int offset = - ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize; + (ToInteger32(const_operand) + instr->additional_index()) * kPointerSize + + FixedArray::kHeaderSize; __ str(value, FieldMemOperand(elements, offset)); } else { __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); + if (instr->additional_index() != 0) { + __ add(scratch, + scratch, + Operand(instr->additional_index() << kPointerSizeLog2)); + } __ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize)); } @@ -3771,7 +3794,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement( } int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); Operand operand = key_is_constant - ? Operand(constant_key * (1 << shift_size) + + ? Operand((constant_key << shift_size) + FixedDoubleArray::kHeaderSize - kHeapObjectTag) : Operand(key, LSL, shift_size); __ add(scratch, elements, operand); @@ -3789,7 +3812,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement( vs); } - __ vstr(value, scratch, 0); + __ vstr(value, scratch, instr->additional_index() << shift_size); } @@ -3810,25 +3833,33 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( key = ToRegister(instr->key()); } int shift_size = ElementsKindToShiftSize(elements_kind); + int additional_offset = instr->additional_index() << shift_size; if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { CpuFeatures::Scope scope(VFP3); DwVfpRegister value(ToDoubleRegister(instr->value())); - Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size)) + Operand operand(key_is_constant ? Operand(constant_key << shift_size) : Operand(key, LSL, shift_size)); __ add(scratch0(), external_pointer, operand); if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { __ vcvt_f32_f64(double_scratch0().low(), value); - __ vstr(double_scratch0().low(), scratch0(), 0); + __ vstr(double_scratch0().low(), scratch0(), additional_offset); } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS - __ vstr(value, scratch0(), 0); + __ vstr(value, scratch0(), additional_offset); } } else { Register value(ToRegister(instr->value())); + if (instr->additional_index() != 0 && !key_is_constant) { + __ add(scratch0(), key, Operand(instr->additional_index())); + } MemOperand mem_operand(key_is_constant - ? MemOperand(external_pointer, constant_key * (1 << shift_size)) - : MemOperand(external_pointer, key, LSL, shift_size)); + ? MemOperand(external_pointer, + ((constant_key + instr->additional_index()) + << shift_size)) + : (instr->additional_index() == 0 + ? MemOperand(external_pointer, key, LSL, shift_size) + : MemOperand(external_pointer, scratch0(), LSL, shift_size))); switch (elements_kind) { case EXTERNAL_PIXEL_ELEMENTS: case EXTERNAL_BYTE_ELEMENTS: @@ -3847,7 +3878,10 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( case EXTERNAL_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3884,20 +3918,22 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { __ cmp(scratch, Operand(from_map)); __ b(ne, ¬_applicable); __ mov(new_map_reg, Operand(to_map)); - if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) { + + if (IsSimpleMapChangeTransition(from_kind, to_kind)) { __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); // Write barrier. __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, scratch, kLRHasBeenSaved, kDontSaveFPRegs); - } else if (from_kind == FAST_SMI_ONLY_ELEMENTS && - to_kind == FAST_DOUBLE_ELEMENTS) { + } else if (IsFastSmiElementsKind(from_kind) && + IsFastDoubleElementsKind(to_kind)) { Register fixed_object_reg = ToRegister(instr->temp_reg()); ASSERT(fixed_object_reg.is(r2)); ASSERT(new_map_reg.is(r3)); __ mov(fixed_object_reg, object_reg); CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(), RelocInfo::CODE_TARGET, instr); - } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) { + } else if (IsFastDoubleElementsKind(from_kind) && + IsFastObjectElementsKind(to_kind)) { Register fixed_object_reg = ToRegister(instr->temp_reg()); ASSERT(fixed_object_reg.is(r2)); ASSERT(new_map_reg.is(r3)); @@ -4671,8 +4707,9 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { // Deopt if the array literal boilerplate ElementsKind is of a type different // than the expected one. The check isn't necessary if the boilerplate has - // already been converted to FAST_ELEMENTS. - if (boilerplate_elements_kind != FAST_ELEMENTS) { + // already been converted to TERMINAL_FAST_ELEMENTS_KIND. + if (CanTransitionToMoreGeneralFastElementsKind( + boilerplate_elements_kind, true)) { __ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object()); // Load map into r2. __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); @@ -4823,10 +4860,11 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) { ElementsKind boilerplate_elements_kind = instr->hydrogen()->boilerplate()->GetElementsKind(); - // Deopt if the literal boilerplate ElementsKind is of a type different than - // the expected one. The check isn't necessary if the boilerplate has already - // been converted to FAST_ELEMENTS. - if (boilerplate_elements_kind != FAST_ELEMENTS) { + // Deopt if the array literal boilerplate ElementsKind is of a type different + // than the expected one. The check isn't necessary if the boilerplate has + // already been converted to TERMINAL_FAST_ELEMENTS_KIND. + if (CanTransitionToMoreGeneralFastElementsKind( + boilerplate_elements_kind, true)) { __ LoadHeapObject(r1, instr->hydrogen()->boilerplate()); // Load map into r2. __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 4da2fec..b4aec54 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -1868,10 +1868,12 @@ void MacroAssembler::CompareRoot(Register obj, void MacroAssembler::CheckFastElements(Register map, Register scratch, Label* fail) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); - STATIC_ASSERT(FAST_ELEMENTS == 1); + STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); + STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); + STATIC_ASSERT(FAST_ELEMENTS == 2); + STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); - cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue)); + cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); b(hi, fail); } @@ -1879,22 +1881,25 @@ void MacroAssembler::CheckFastElements(Register map, void MacroAssembler::CheckFastObjectElements(Register map, Register scratch, Label* fail) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); - STATIC_ASSERT(FAST_ELEMENTS == 1); + STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); + STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); + STATIC_ASSERT(FAST_ELEMENTS == 2); + STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); - cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue)); + cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); b(ls, fail); - cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue)); + cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); b(hi, fail); } -void MacroAssembler::CheckFastSmiOnlyElements(Register map, - Register scratch, - Label* fail) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); +void MacroAssembler::CheckFastSmiElements(Register map, + Register scratch, + Label* fail) { + STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); + STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); - cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue)); + cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); b(hi, fail); } @@ -1997,22 +2002,17 @@ void MacroAssembler::CompareMap(Register obj, ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); cmp(scratch, Operand(map)); if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) { - Map* transitioned_fast_element_map( - map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL)); - ASSERT(transitioned_fast_element_map == NULL || - map->elements_kind() != FAST_ELEMENTS); - if (transitioned_fast_element_map != NULL) { - b(eq, early_success); - cmp(scratch, Operand(Handle(transitioned_fast_element_map))); - } - - Map* transitioned_double_map( - map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL)); - ASSERT(transitioned_double_map == NULL || - map->elements_kind() == FAST_SMI_ONLY_ELEMENTS); - if (transitioned_double_map != NULL) { - b(eq, early_success); - cmp(scratch, Operand(Handle(transitioned_double_map))); + ElementsKind kind = map->elements_kind(); + if (IsFastElementsKind(kind)) { + bool packed = IsFastPackedElementsKind(kind); + Map* current_map = *map; + while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) { + kind = GetNextMoreGeneralFastElementsKind(kind, packed); + current_map = current_map->LookupElementsTransitionMap(kind, NULL); + if (!current_map) break; + b(eq, early_success); + cmp(scratch, Operand(Handle(current_map))); + } } } } @@ -2865,28 +2865,38 @@ void MacroAssembler::LoadTransitionedArrayMapConditional( ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset)); // Check that the function's map is the same as the expected cached map. - int expected_index = - Context::GetContextMapIndexFromElementsKind(expected_kind); - ldr(ip, MemOperand(scratch, Context::SlotOffset(expected_index))); - cmp(map_in_out, ip); + ldr(scratch, + MemOperand(scratch, + Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX))); + size_t offset = expected_kind * kPointerSize + + FixedArrayBase::kHeaderSize; + cmp(map_in_out, scratch); b(ne, no_map_match); // Use the transitioned cached map. - int trans_index = - Context::GetContextMapIndexFromElementsKind(transitioned_kind); - ldr(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index))); + offset = transitioned_kind * kPointerSize + + FixedArrayBase::kHeaderSize; + ldr(map_in_out, FieldMemOperand(scratch, offset)); } void MacroAssembler::LoadInitialArrayMap( - Register function_in, Register scratch, Register map_out) { + Register function_in, Register scratch, + Register map_out, bool can_have_holes) { ASSERT(!function_in.is(map_out)); Label done; ldr(map_out, FieldMemOperand(function_in, JSFunction::kPrototypeOrInitialMapOffset)); if (!FLAG_smi_only_arrays) { - LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, - FAST_ELEMENTS, + ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS; + LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, + kind, + map_out, + scratch, + &done); + } else if (can_have_holes) { + LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, + FAST_HOLEY_SMI_ELEMENTS, map_out, scratch, &done); diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 360f4c1..b93aba1 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -512,7 +512,8 @@ class MacroAssembler: public Assembler { // Load the initial map for new Arrays from a JSFunction. void LoadInitialArrayMap(Register function_in, Register scratch, - Register map_out); + Register map_out, + bool can_have_holes); void LoadGlobalFunction(int index, Register function); @@ -802,9 +803,9 @@ class MacroAssembler: public Assembler { // Check if a map for a JSObject indicates that the object has fast smi only // elements. Jump to the specified label if it does not. - void CheckFastSmiOnlyElements(Register map, - Register scratch, - Label* fail); + void CheckFastSmiElements(Register map, + Register scratch, + Label* fail); // Check to see if maybe_number can be stored as a double in // FastDoubleElements. If it can, store it at the index specified by key in diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc index a833624..11790e5 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2009 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -43,45 +43,49 @@ namespace internal { #ifndef V8_INTERPRETED_REGEXP /* * This assembler uses the following register assignment convention + * - r4 : Temporarily stores the index of capture start after a matching pass + * for a global regexp. * - r5 : Pointer to current code object (Code*) including heap object tag. * - r6 : Current position in input, as negative offset from end of string. * Please notice that this is the byte offset, not the character offset! * - r7 : Currently loaded character. Must be loaded using * LoadCurrentCharacter before using any of the dispatch methods. - * - r8 : points to tip of backtrack stack + * - r8 : Points to tip of backtrack stack * - r9 : Unused, might be used by C code and expected unchanged. * - r10 : End of input (points to byte after last character in input). * - r11 : Frame pointer. Used to access arguments, local variables and * RegExp registers. * - r12 : IP register, used by assembler. Very volatile. - * - r13/sp : points to tip of C stack. + * - r13/sp : Points to tip of C stack. * * The remaining registers are free for computations. * Each call to a public method should retain this convention. * * The stack will have the following structure: - * - fp[52] Isolate* isolate (Address of the current isolate) - * - fp[48] direct_call (if 1, direct call from JavaScript code, - * if 0, call through the runtime system). - * - fp[44] stack_area_base (High end of the memory area to use as - * backtracking stack). + * - fp[56] Isolate* isolate (address of the current isolate) + * - fp[52] direct_call (if 1, direct call from JavaScript code, + * if 0, call through the runtime system). + * - fp[48] stack_area_base (high end of the memory area to use as + * backtracking stack). + * - fp[44] capture array size (may fit multiple sets of matches) * - fp[40] int* capture_array (int[num_saved_registers_], for output). * - fp[36] secondary link/return address used by native call. * --- sp when called --- - * - fp[32] return address (lr). - * - fp[28] old frame pointer (r11). + * - fp[32] return address (lr). + * - fp[28] old frame pointer (r11). * - fp[0..24] backup of registers r4..r10. * --- frame pointer ---- - * - fp[-4] end of input (Address of end of string). - * - fp[-8] start of input (Address of first character in string). + * - fp[-4] end of input (address of end of string). + * - fp[-8] start of input (address of first character in string). * - fp[-12] start index (character index of start). * - fp[-16] void* input_string (location of a handle containing the string). - * - fp[-20] Offset of location before start of input (effectively character + * - fp[-20] success counter (only for global regexps to count matches). + * - fp[-24] Offset of location before start of input (effectively character * position -1). Used to initialize capture registers to a * non-position. - * - fp[-24] At start (if 1, we are starting at the start of the + * - fp[-28] At start (if 1, we are starting at the start of the * string, otherwise 0) - * - fp[-28] register 0 (Only positions must be stored in the first + * - fp[-32] register 0 (Only positions must be stored in the first * - register 1 num_saved_registers_ registers) * - ... * - register num_registers-1 @@ -197,9 +201,9 @@ void RegExpMacroAssemblerARM::CheckCharacterGT(uc16 limit, Label* on_greater) { void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) { Label not_at_start; // Did we start the match at the start of the string at all? - __ ldr(r0, MemOperand(frame_pointer(), kAtStart)); + __ ldr(r0, MemOperand(frame_pointer(), kStartIndex)); __ cmp(r0, Operand(0, RelocInfo::NONE)); - BranchOrBacktrack(eq, ¬_at_start); + BranchOrBacktrack(ne, ¬_at_start); // If we did, are we still at the start of the input? __ ldr(r1, MemOperand(frame_pointer(), kInputStart)); @@ -212,9 +216,9 @@ void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) { void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) { // Did we start the match at the start of the string at all? - __ ldr(r0, MemOperand(frame_pointer(), kAtStart)); + __ ldr(r0, MemOperand(frame_pointer(), kStartIndex)); __ cmp(r0, Operand(0, RelocInfo::NONE)); - BranchOrBacktrack(eq, on_not_at_start); + BranchOrBacktrack(ne, on_not_at_start); // If we did, are we still at the start of the input? __ ldr(r1, MemOperand(frame_pointer(), kInputStart)); __ add(r0, end_of_input_address(), Operand(current_input_offset())); @@ -655,6 +659,7 @@ void RegExpMacroAssemblerARM::Fail() { Handle RegExpMacroAssemblerARM::GetCode(Handle source) { + Label return_r0; // Finalize code - write the entry point code now we know how many // registers we need. @@ -678,8 +683,9 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { // Set frame pointer in space for it if this is not a direct call // from generated code. __ add(frame_pointer(), sp, Operand(4 * kPointerSize)); + __ mov(r0, Operand(0, RelocInfo::NONE)); + __ push(r0); // Make room for success counter and initialize it to 0. __ push(r0); // Make room for "position - 1" constant (value is irrelevant). - __ push(r0); // Make room for "at start" constant (value is irrelevant). // Check if we have space on the stack for registers. Label stack_limit_hit; Label stack_ok; @@ -698,13 +704,13 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { // Exit with OutOfMemory exception. There is not enough space on the stack // for our working registers. __ mov(r0, Operand(EXCEPTION)); - __ jmp(&exit_label_); + __ jmp(&return_r0); __ bind(&stack_limit_hit); CallCheckStackGuardState(r0); __ cmp(r0, Operand(0, RelocInfo::NONE)); // If returned value is non-zero, we exit with the returned value as result. - __ b(ne, &exit_label_); + __ b(ne, &return_r0); __ bind(&stack_ok); @@ -725,41 +731,45 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { // position registers. __ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne)); - // Determine whether the start index is zero, that is at the start of the - // string, and store that value in a local variable. - __ cmp(r1, Operand(0)); - __ mov(r1, Operand(1), LeaveCC, eq); - __ mov(r1, Operand(0, RelocInfo::NONE), LeaveCC, ne); - __ str(r1, MemOperand(frame_pointer(), kAtStart)); + // Initialize code pointer register + __ mov(code_pointer(), Operand(masm_->CodeObject())); + + Label load_char_start_regexp, start_regexp; + // Load newline if index is at start, previous character otherwise. + __ cmp(r1, Operand(0, RelocInfo::NONE)); + __ b(ne, &load_char_start_regexp); + __ mov(current_character(), Operand('\n'), LeaveCC, eq); + __ jmp(&start_regexp); + + // Global regexp restarts matching here. + __ bind(&load_char_start_regexp); + // Load previous char as initial value of current character register. + LoadCurrentCharacterUnchecked(-1, 1); + __ bind(&start_regexp); + // Initialize on-stack registers. if (num_saved_registers_ > 0) { // Always is, if generated from a regexp. // Fill saved registers with initial value = start offset - 1 - - // Address of register 0. - __ add(r1, frame_pointer(), Operand(kRegisterZero)); - __ mov(r2, Operand(num_saved_registers_)); - Label init_loop; - __ bind(&init_loop); - __ str(r0, MemOperand(r1, kPointerSize, NegPostIndex)); - __ sub(r2, r2, Operand(1), SetCC); - __ b(ne, &init_loop); + if (num_saved_registers_ > 8) { + // Address of register 0. + __ add(r1, frame_pointer(), Operand(kRegisterZero)); + __ mov(r2, Operand(num_saved_registers_)); + Label init_loop; + __ bind(&init_loop); + __ str(r0, MemOperand(r1, kPointerSize, NegPostIndex)); + __ sub(r2, r2, Operand(1), SetCC); + __ b(ne, &init_loop); + } else { + for (int i = 0; i < num_saved_registers_; i++) { + __ str(r0, register_location(i)); + } + } } // Initialize backtrack stack pointer. __ ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd)); - // Initialize code pointer register - __ mov(code_pointer(), Operand(masm_->CodeObject())); - // Load previous char as initial value of current character register. - Label at_start; - __ ldr(r0, MemOperand(frame_pointer(), kAtStart)); - __ cmp(r0, Operand(0, RelocInfo::NONE)); - __ b(ne, &at_start); - LoadCurrentCharacterUnchecked(-1, 1); // Load previous char. - __ jmp(&start_label_); - __ bind(&at_start); - __ mov(current_character(), Operand('\n')); - __ jmp(&start_label_); + __ jmp(&start_label_); // Exit code: if (success_label_.is_linked()) { @@ -786,6 +796,10 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { for (int i = 0; i < num_saved_registers_; i += 2) { __ ldr(r2, register_location(i)); __ ldr(r3, register_location(i + 1)); + if (global()) { + // Keep capture start in r4 for the zero-length check later. + __ mov(r4, r2); + } if (mode_ == UC16) { __ add(r2, r1, Operand(r2, ASR, 1)); __ add(r3, r1, Operand(r3, ASR, 1)); @@ -797,10 +811,54 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { __ str(r3, MemOperand(r0, kPointerSize, PostIndex)); } } - __ mov(r0, Operand(SUCCESS)); + + if (global()) { + // Restart matching if the regular expression is flagged as global. + __ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures)); + __ ldr(r1, MemOperand(frame_pointer(), kNumOutputRegisters)); + __ ldr(r2, MemOperand(frame_pointer(), kRegisterOutput)); + // Increment success counter. + __ add(r0, r0, Operand(1)); + __ str(r0, MemOperand(frame_pointer(), kSuccessfulCaptures)); + // Capture results have been stored, so the number of remaining global + // output registers is reduced by the number of stored captures. + __ sub(r1, r1, Operand(num_saved_registers_)); + // Check whether we have enough room for another set of capture results. + __ cmp(r1, Operand(num_saved_registers_)); + __ b(lt, &return_r0); + + __ str(r1, MemOperand(frame_pointer(), kNumOutputRegisters)); + // Advance the location for output. + __ add(r2, r2, Operand(num_saved_registers_ * kPointerSize)); + __ str(r2, MemOperand(frame_pointer(), kRegisterOutput)); + + // Prepare r0 to initialize registers with its value in the next run. + __ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne)); + // Special case for zero-length matches. + // r4: capture start index + __ cmp(current_input_offset(), r4); + // Not a zero-length match, restart. + __ b(ne, &load_char_start_regexp); + // Offset from the end is zero if we already reached the end. + __ cmp(current_input_offset(), Operand(0)); + __ b(eq, &exit_label_); + // Advance current position after a zero-length match. + __ add(current_input_offset(), + current_input_offset(), + Operand((mode_ == UC16) ? 2 : 1)); + __ b(&load_char_start_regexp); + } else { + __ mov(r0, Operand(SUCCESS)); + } } + // Exit and return r0 __ bind(&exit_label_); + if (global()) { + __ ldr(r0, MemOperand(frame_pointer(), kSuccessfulCaptures)); + } + + __ bind(&return_r0); // Skip sp past regexp registers and local variables.. __ mov(sp, frame_pointer()); // Restore registers r4..r11 and return (restoring lr to pc). @@ -822,7 +880,7 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { __ cmp(r0, Operand(0, RelocInfo::NONE)); // If returning non-zero, we should end execution with the given // result as return value. - __ b(ne, &exit_label_); + __ b(ne, &return_r0); // String might have moved: Reload end of string from frame. __ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); @@ -859,7 +917,7 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { __ bind(&exit_with_exception); // Exit with Result EXCEPTION(-1) to signal thrown exception. __ mov(r0, Operand(EXCEPTION)); - __ jmp(&exit_label_); + __ jmp(&return_r0); } CodeDesc code_desc; @@ -1014,8 +1072,9 @@ void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) { } -void RegExpMacroAssemblerARM::Succeed() { +bool RegExpMacroAssemblerARM::Succeed() { __ jmp(&success_label_); + return global(); } @@ -1307,8 +1366,9 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset, int characters) { Register offset = current_input_offset(); if (cp_offset != 0) { - __ add(r0, current_input_offset(), Operand(cp_offset * char_size())); - offset = r0; + // r4 is not being used to store the capture start index at this point. + __ add(r4, current_input_offset(), Operand(cp_offset * char_size())); + offset = r4; } // The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU // and the operating system running on the target allow it. diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h index 14f984f..f2d5c55 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.h +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -113,7 +113,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { virtual void ReadStackPointerFromRegister(int reg); virtual void SetCurrentPositionFromEnd(int by); virtual void SetRegister(int register_index, int to); - virtual void Succeed(); + virtual bool Succeed(); virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); virtual void ClearRegisters(int reg_from, int reg_to); virtual void WriteStackPointerToRegister(int reg); @@ -137,7 +137,8 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize; // Stack parameters placed by caller. static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize; - static const int kStackHighEnd = kRegisterOutput + kPointerSize; + static const int kNumOutputRegisters = kRegisterOutput + kPointerSize; + static const int kStackHighEnd = kNumOutputRegisters + kPointerSize; static const int kDirectCall = kStackHighEnd + kPointerSize; static const int kIsolate = kDirectCall + kPointerSize; @@ -149,10 +150,10 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { static const int kInputString = kStartIndex - kPointerSize; // When adding local variables remember to push space for them in // the frame in GetCode. - static const int kInputStartMinusOne = kInputString - kPointerSize; - static const int kAtStart = kInputStartMinusOne - kPointerSize; + static const int kSuccessfulCaptures = kInputString - kPointerSize; + static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize; // First register address. Following registers are below it on the stack. - static const int kRegisterZero = kAtStart - kPointerSize; + static const int kRegisterZero = kInputStartMinusOne - kPointerSize; // Initial size of code buffer. static const size_t kRegExpCodeSize = 1024; diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h index 585f1e0..d1cad15 100644 --- a/deps/v8/src/arm/simulator-arm.h +++ b/deps/v8/src/arm/simulator-arm.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -49,16 +49,16 @@ namespace internal { (entry(p0, p1, p2, p3, p4)) typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*, - void*, int*, Address, int, Isolate*); + void*, int*, int, Address, int, Isolate*); // Call the generated regexp code directly. The code at the entry address // should act as a function matching the type arm_regexp_matcher. // The fifth argument is a dummy that reserves the space used for // the return address added by the ExitFrame in native calls. -#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \ +#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ (FUNCTION_CAST(entry)( \ - p0, p1, p2, p3, NULL, p4, p5, p6, p7)) + p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)) #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ reinterpret_cast(try_catch_address) @@ -401,9 +401,9 @@ class Simulator { reinterpret_cast(Simulator::current(Isolate::Current())->Call( \ FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4)) -#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \ +#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ Simulator::current(Isolate::Current())->Call( \ - entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7) + entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8) #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ try_catch_address == NULL ? \ diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index 49c0982..a024d79 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -1581,16 +1581,29 @@ Handle CallStubCompiler::CompileArrayPushCall( __ jmp(&fast_object); // In case of fast smi-only, convert to fast object, otherwise bail out. __ bind(¬_fast_object); - __ CheckFastSmiOnlyElements(r3, r7, &call_builtin); + __ CheckFastSmiElements(r3, r7, &call_builtin); // edx: receiver // r3: map - __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + Label try_holey_map; + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, r3, r7, + &try_holey_map); + __ mov(r2, receiver); + ElementsTransitionGenerator:: + GenerateMapChangeElementsTransition(masm()); + __ jmp(&fast_object); + + __ bind(&try_holey_map); + __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS, + FAST_HOLEY_ELEMENTS, + r3, + r7, &call_builtin); __ mov(r2, receiver); - ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm()); + ElementsTransitionGenerator:: + GenerateMapChangeElementsTransition(masm()); __ bind(&fast_object); } else { __ CheckFastObjectElements(r3, r3, &call_builtin); @@ -3372,8 +3385,11 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) { case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3497,8 +3513,11 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( } break; case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3838,8 +3857,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( } break; case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3902,8 +3924,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -4042,8 +4067,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -4225,7 +4253,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( // Check that the key is a smi or a heap number convertible to a smi. GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic); - if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { + if (IsFastSmiElementsKind(elements_kind)) { __ JumpIfNotSmi(value_reg, &transition_elements_kind); } @@ -4253,7 +4281,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( DONT_DO_SMI_CHECK); __ bind(&finish_store); - if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { + if (IsFastSmiElementsKind(elements_kind)) { __ add(scratch, elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); @@ -4263,7 +4291,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); __ str(value_reg, MemOperand(scratch)); } else { - ASSERT(elements_kind == FAST_ELEMENTS); + ASSERT(IsFastObjectElementsKind(elements_kind)); __ add(scratch, elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js index 9f2c8de..a1cc5b6 100644 --- a/deps/v8/src/array.js +++ b/deps/v8/src/array.js @@ -827,7 +827,8 @@ function ArraySort(comparefn) { var element = a[i]; var order = %_CallFunction(receiver, element, pivot, comparefn); if (order < 0) { - %_SwapElements(a, i, low_end); + a[i] = a[low_end]; + a[low_end] = element; low_end++; } else if (order > 0) { do { @@ -836,9 +837,12 @@ function ArraySort(comparefn) { var top_elem = a[high_start]; order = %_CallFunction(receiver, top_elem, pivot, comparefn); } while (order > 0); - %_SwapElements(a, i, high_start); + a[i] = a[high_start]; + a[high_start] = element; if (order < 0) { - %_SwapElements(a, i, low_end); + element = a[i]; + a[i] = a[low_end]; + a[low_end] = element; low_end++; } } diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index c65c68c..a20f87b 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -484,8 +484,8 @@ Handle Genesis::CreateEmptyFunction(Isolate* isolate) { global_context()->set_initial_object_prototype(*prototype); SetPrototype(object_fun, prototype); - object_function_map-> - set_instance_descriptors(heap->empty_descriptor_array()); + object_function_map->set_instance_descriptors( + heap->empty_descriptor_array()); } // Allocate the empty function as the prototype for function ECMAScript @@ -516,12 +516,10 @@ Handle Genesis::CreateEmptyFunction(Isolate* isolate) { function_instance_map_writable_prototype_->set_prototype(*empty_function); // Allocate the function map first and then patch the prototype later - Handle empty_fm = factory->CopyMapDropDescriptors( - function_without_prototype_map); - empty_fm->set_instance_descriptors( - function_without_prototype_map->instance_descriptors()); - empty_fm->set_prototype(global_context()->object_function()->prototype()); - empty_function->set_map(*empty_fm); + Handle empty_function_map = CreateFunctionMap(DONT_ADD_PROTOTYPE); + empty_function_map->set_prototype( + global_context()->object_function()->prototype()); + empty_function->set_map(*empty_function_map); return empty_function; } @@ -1094,7 +1092,7 @@ bool Genesis::InitializeGlobal(Handle inner_global, // Check the state of the object. ASSERT(result->HasFastProperties()); - ASSERT(result->HasFastElements()); + ASSERT(result->HasFastObjectElements()); #endif } @@ -1187,7 +1185,7 @@ bool Genesis::InitializeGlobal(Handle inner_global, // Check the state of the object. ASSERT(result->HasFastProperties()); - ASSERT(result->HasFastElements()); + ASSERT(result->HasFastObjectElements()); #endif } @@ -1637,7 +1635,7 @@ bool Genesis::InstallNatives() { array_function->initial_map()->CopyDropTransitions(); Map* new_map; if (!maybe_map->To(&new_map)) return false; - new_map->set_elements_kind(FAST_ELEMENTS); + new_map->set_elements_kind(FAST_HOLEY_ELEMENTS); array_function->set_initial_map(new_map); // Make "length" magic on instances. diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index 6d1c6a9..64ec3d9 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -200,9 +200,12 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args, array->set_elements(heap->empty_fixed_array()); if (!FLAG_smi_only_arrays) { Context* global_context = isolate->context()->global_context(); - if (array->GetElementsKind() == FAST_SMI_ONLY_ELEMENTS && - !global_context->object_js_array_map()->IsUndefined()) { - array->set_map(Map::cast(global_context->object_js_array_map())); + if (array->GetElementsKind() == GetInitialFastElementsKind() && + !global_context->js_array_maps()->IsUndefined()) { + FixedArray* map_array = + FixedArray::cast(global_context->js_array_maps()); + array->set_map(Map::cast(map_array-> + get(TERMINAL_FAST_ELEMENTS_KIND))); } } } else { @@ -222,6 +225,13 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args, { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len); if (!maybe_obj->ToObject(&fixed_array)) return maybe_obj; } + ElementsKind elements_kind = array->GetElementsKind(); + if (!IsFastHoleyElementsKind(elements_kind)) { + elements_kind = GetHoleyElementsKind(elements_kind); + MaybeObject* maybe_array = + array->TransitionElementsKind(elements_kind); + if (maybe_array->IsFailure()) return maybe_array; + } // We do not use SetContent to skip the unnecessary elements type check. array->set_elements(FixedArray::cast(fixed_array)); array->set_length(Smi::cast(obj)); @@ -250,7 +260,7 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args, // Allocate an appropriately typed elements array. MaybeObject* maybe_elms; ElementsKind elements_kind = array->GetElementsKind(); - if (elements_kind == FAST_DOUBLE_ELEMENTS) { + if (IsFastDoubleElementsKind(elements_kind)) { maybe_elms = heap->AllocateUninitializedFixedDoubleArray( number_of_elements); } else { @@ -261,13 +271,15 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args, // Fill in the content switch (array->GetElementsKind()) { - case FAST_SMI_ONLY_ELEMENTS: { + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_SMI_ELEMENTS: { FixedArray* smi_elms = FixedArray::cast(elms); for (int index = 0; index < number_of_elements; index++) { smi_elms->set(index, (*args)[index+1], SKIP_WRITE_BARRIER); } break; } + case FAST_HOLEY_ELEMENTS: case FAST_ELEMENTS: { AssertNoAllocation no_gc; WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); @@ -277,6 +289,7 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args, } break; } + case FAST_HOLEY_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: { FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms); for (int index = 0; index < number_of_elements; index++) { @@ -412,19 +425,17 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements( HeapObject* elms = array->elements(); Map* map = elms->map(); if (map == heap->fixed_array_map()) { - if (array->HasFastElements()) return elms; - if (args == NULL) { - if (array->HasFastDoubleElements()) { - ASSERT(elms == heap->empty_fixed_array()); - MaybeObject* maybe_transition = - array->TransitionElementsKind(FAST_ELEMENTS); - if (maybe_transition->IsFailure()) return maybe_transition; - } + if (args == NULL || array->HasFastObjectElements()) return elms; + if (array->HasFastDoubleElements()) { + ASSERT(elms == heap->empty_fixed_array()); + MaybeObject* maybe_transition = + array->TransitionElementsKind(FAST_ELEMENTS); + if (maybe_transition->IsFailure()) return maybe_transition; return elms; } } else if (map == heap->fixed_cow_array_map()) { MaybeObject* maybe_writable_result = array->EnsureWritableFastElements(); - if (args == NULL || array->HasFastElements() || + if (args == NULL || array->HasFastObjectElements() || maybe_writable_result->IsFailure()) { return maybe_writable_result; } @@ -518,8 +529,8 @@ BUILTIN(ArrayPush) { } FixedArray* new_elms = FixedArray::cast(obj); - CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0, - new_elms, FAST_ELEMENTS, 0, len); + ElementsKind kind = array->GetElementsKind(); + CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, 0, len); FillWithHoles(heap, new_elms, new_length, capacity); elms = new_elms; @@ -590,7 +601,7 @@ BUILTIN(ArrayShift) { } FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); - ASSERT(array->HasFastTypeElements()); + ASSERT(array->HasFastSmiOrObjectElements()); int len = Smi::cast(array->length())->value(); if (len == 0) return heap->undefined_value(); @@ -632,7 +643,7 @@ BUILTIN(ArrayUnshift) { } FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); - ASSERT(array->HasFastTypeElements()); + ASSERT(array->HasFastSmiOrObjectElements()); int len = Smi::cast(array->length())->value(); int to_add = args.length() - 1; @@ -654,8 +665,8 @@ BUILTIN(ArrayUnshift) { if (!maybe_obj->ToObject(&obj)) return maybe_obj; } FixedArray* new_elms = FixedArray::cast(obj); - CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0, - new_elms, FAST_ELEMENTS, to_add, len); + ElementsKind kind = array->GetElementsKind(); + CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, to_add, len); FillWithHoles(heap, new_elms, new_length, capacity); elms = new_elms; array->set_elements(elms); @@ -684,7 +695,7 @@ BUILTIN(ArraySlice) { int len = -1; if (receiver->IsJSArray()) { JSArray* array = JSArray::cast(receiver); - if (!array->HasFastTypeElements() || + if (!array->HasFastSmiOrObjectElements() || !IsJSArrayFastElementMovingAllowed(heap, array)) { return CallJsBuiltin(isolate, "ArraySlice", args); } @@ -700,7 +711,7 @@ BUILTIN(ArraySlice) { bool is_arguments_object_with_fast_elements = receiver->IsJSObject() && JSObject::cast(receiver)->map() == arguments_map - && JSObject::cast(receiver)->HasFastTypeElements(); + && JSObject::cast(receiver)->HasFastSmiOrObjectElements(); if (!is_arguments_object_with_fast_elements) { return CallJsBuiltin(isolate, "ArraySlice", args); } @@ -765,9 +776,9 @@ BUILTIN(ArraySlice) { JSArray* result_array; if (!maybe_array->To(&result_array)) return maybe_array; - CopyObjectToObjectElements(elms, FAST_ELEMENTS, k, + CopyObjectToObjectElements(elms, elements_kind, k, FixedArray::cast(result_array->elements()), - FAST_ELEMENTS, 0, result_len); + elements_kind, 0, result_len); return result_array; } @@ -788,7 +799,7 @@ BUILTIN(ArraySplice) { } FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); - ASSERT(array->HasFastTypeElements()); + ASSERT(array->HasFastSmiOrObjectElements()); int len = Smi::cast(array->length())->value(); @@ -839,9 +850,9 @@ BUILTIN(ArraySplice) { { // Fill newly created array. - CopyObjectToObjectElements(elms, FAST_ELEMENTS, actual_start, + CopyObjectToObjectElements(elms, elements_kind, actual_start, FixedArray::cast(result_array->elements()), - FAST_ELEMENTS, 0, actual_delete_count); + elements_kind, 0, actual_delete_count); } int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0; @@ -890,12 +901,13 @@ BUILTIN(ArraySplice) { { // Copy the part before actual_start as is. - CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0, - new_elms, FAST_ELEMENTS, 0, actual_start); + ElementsKind kind = array->GetElementsKind(); + CopyObjectToObjectElements(elms, kind, 0, + new_elms, kind, 0, actual_start); const int to_copy = len - actual_delete_count - actual_start; - CopyObjectToObjectElements(elms, FAST_ELEMENTS, + CopyObjectToObjectElements(elms, kind, actual_start + actual_delete_count, - new_elms, FAST_ELEMENTS, + new_elms, kind, actual_start + item_count, to_copy); } @@ -942,11 +954,12 @@ BUILTIN(ArrayConcat) { // and calculating total length. int n_arguments = args.length(); int result_len = 0; - ElementsKind elements_kind = FAST_SMI_ONLY_ELEMENTS; + ElementsKind elements_kind = GetInitialFastElementsKind(); for (int i = 0; i < n_arguments; i++) { Object* arg = args[i]; - if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastTypeElements() - || JSArray::cast(arg)->GetPrototype() != array_proto) { + if (!arg->IsJSArray() || + !JSArray::cast(arg)->HasFastSmiOrObjectElements() || + JSArray::cast(arg)->GetPrototype() != array_proto) { return CallJsBuiltin(isolate, "ArrayConcat", args); } @@ -963,8 +976,18 @@ BUILTIN(ArrayConcat) { return CallJsBuiltin(isolate, "ArrayConcat", args); } - if (!JSArray::cast(arg)->HasFastSmiOnlyElements()) { - elements_kind = FAST_ELEMENTS; + if (!JSArray::cast(arg)->HasFastSmiElements()) { + if (IsFastSmiElementsKind(elements_kind)) { + if (IsFastHoleyElementsKind(elements_kind)) { + elements_kind = FAST_HOLEY_ELEMENTS; + } else { + elements_kind = FAST_ELEMENTS; + } + } + } + + if (JSArray::cast(arg)->HasFastHoleyElements()) { + elements_kind = GetHoleyElementsKind(elements_kind); } } @@ -984,8 +1007,8 @@ BUILTIN(ArrayConcat) { JSArray* array = JSArray::cast(args[i]); int len = Smi::cast(array->length())->value(); FixedArray* elms = FixedArray::cast(array->elements()); - CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0, - result_elms, FAST_ELEMENTS, + CopyObjectToObjectElements(elms, elements_kind, 0, + result_elms, elements_kind, start_pos, len); start_pos += len; } diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc index 814e358..8f31660 100644 --- a/deps/v8/src/code-stubs.cc +++ b/deps/v8/src/code-stubs.cc @@ -262,10 +262,13 @@ void JSEntryStub::FinishCode(Handle code) { void KeyedLoadElementStub::Generate(MacroAssembler* masm) { switch (elements_kind_) { case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_SMI_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: KeyedLoadStubCompiler::GenerateLoadFastElement(masm); break; case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(masm); break; case EXTERNAL_BYTE_ELEMENTS: @@ -292,7 +295,9 @@ void KeyedLoadElementStub::Generate(MacroAssembler* masm) { void KeyedStoreElementStub::Generate(MacroAssembler* masm) { switch (elements_kind_) { case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: { + case FAST_HOLEY_ELEMENTS: + case FAST_SMI_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: { KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_, elements_kind_, @@ -300,6 +305,7 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) { } break; case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm, is_js_array_, grow_mode_); @@ -430,24 +436,32 @@ bool ToBooleanStub::Types::CanBeUndetectable() const { void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) { Label fail; + ASSERT(!IsFastHoleyElementsKind(from_) || IsFastHoleyElementsKind(to_)); if (!FLAG_trace_elements_transitions) { - if (to_ == FAST_ELEMENTS) { - if (from_ == FAST_SMI_ONLY_ELEMENTS) { - ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm); - } else if (from_ == FAST_DOUBLE_ELEMENTS) { + if (IsFastSmiOrObjectElementsKind(to_)) { + if (IsFastSmiOrObjectElementsKind(from_)) { + ElementsTransitionGenerator:: + GenerateMapChangeElementsTransition(masm); + } else if (IsFastDoubleElementsKind(from_)) { + ASSERT(!IsFastSmiElementsKind(to_)); ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail); } else { UNREACHABLE(); } KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_jsarray_, - FAST_ELEMENTS, + to_, grow_mode_); - } else if (from_ == FAST_SMI_ONLY_ELEMENTS && to_ == FAST_DOUBLE_ELEMENTS) { - ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail); + } else if (IsFastSmiElementsKind(from_) && + IsFastDoubleElementsKind(to_)) { + ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail); KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm, is_jsarray_, grow_mode_); + } else if (IsFastDoubleElementsKind(from_)) { + ASSERT(to_ == FAST_HOLEY_DOUBLE_ELEMENTS); + ElementsTransitionGenerator:: + GenerateMapChangeElementsTransition(masm); } else { UNREACHABLE(); } diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h index 50d70f2..08a777f 100644 --- a/deps/v8/src/codegen.h +++ b/deps/v8/src/codegen.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -95,8 +95,8 @@ UnaryMathFunction CreateSqrtFunction(); class ElementsTransitionGenerator : public AllStatic { public: - static void GenerateSmiOnlyToObject(MacroAssembler* masm); - static void GenerateSmiOnlyToDouble(MacroAssembler* masm, Label* fail); + static void GenerateMapChangeElementsTransition(MacroAssembler* masm); + static void GenerateSmiToDouble(MacroAssembler* masm, Label* fail); static void GenerateDoubleToObject(MacroAssembler* masm, Label* fail); private: diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index c9c2480..ecac5cb 100644 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -118,7 +118,7 @@ bool CompilationInfo::ShouldSelfOptimize() { FLAG_crankshaft && !function()->flags()->Contains(kDontSelfOptimize) && !function()->flags()->Contains(kDontOptimize) && - function()->scope()->allows_lazy_recompilation() && + function()->scope()->AllowsLazyRecompilation() && (shared_info().is_null() || !shared_info()->optimization_disabled()); } diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h index 647c15c..d154b82 100644 --- a/deps/v8/src/contexts.h +++ b/deps/v8/src/contexts.h @@ -106,9 +106,7 @@ enum BindingFlags { V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \ V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \ V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \ - V(SMI_JS_ARRAY_MAP_INDEX, Object, smi_js_array_map) \ - V(DOUBLE_JS_ARRAY_MAP_INDEX, Object, double_js_array_map) \ - V(OBJECT_JS_ARRAY_MAP_INDEX, Object, object_js_array_map) \ + V(JS_ARRAY_MAPS_INDEX, Object, js_array_maps) \ V(DATE_FUNCTION_INDEX, JSFunction, date_function) \ V(JSON_OBJECT_INDEX, JSObject, json_object) \ V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \ @@ -248,9 +246,7 @@ class Context: public FixedArray { OBJECT_FUNCTION_INDEX, INTERNAL_ARRAY_FUNCTION_INDEX, ARRAY_FUNCTION_INDEX, - SMI_JS_ARRAY_MAP_INDEX, - DOUBLE_JS_ARRAY_MAP_INDEX, - OBJECT_JS_ARRAY_MAP_INDEX, + JS_ARRAY_MAPS_INDEX, DATE_FUNCTION_INDEX, JSON_OBJECT_INDEX, REGEXP_FUNCTION_INDEX, @@ -373,18 +369,6 @@ class Context: public FixedArray { Object* OptimizedFunctionsListHead(); void ClearOptimizedFunctions(); - static int GetContextMapIndexFromElementsKind( - ElementsKind elements_kind) { - if (elements_kind == FAST_DOUBLE_ELEMENTS) { - return Context::DOUBLE_JS_ARRAY_MAP_INDEX; - } else if (elements_kind == FAST_ELEMENTS) { - return Context::OBJECT_JS_ARRAY_MAP_INDEX; - } else { - ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS); - return Context::SMI_JS_ARRAY_MAP_INDEX; - } - } - #define GLOBAL_CONTEXT_FIELD_ACCESSORS(index, type, name) \ void set_##name(type* value) { \ ASSERT(IsGlobalContext()); \ diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc index adbe550..3f0b224 100644 --- a/deps/v8/src/d8.cc +++ b/deps/v8/src/d8.cc @@ -26,7 +26,8 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#ifdef USING_V8_SHARED // Defined when linking against shared lib on Windows. +// Defined when linking against shared lib on Windows. +#if defined(USING_V8_SHARED) && !defined(V8_SHARED) #define V8_SHARED #endif @@ -833,8 +834,6 @@ Handle Shell::CreateGlobalTemplate() { global_template->Set(String::New("print"), FunctionTemplate::New(Print)); global_template->Set(String::New("write"), FunctionTemplate::New(Write)); global_template->Set(String::New("read"), FunctionTemplate::New(Read)); - global_template->Set(String::New("readbinary"), - FunctionTemplate::New(ReadBinary)); global_template->Set(String::New("readbuffer"), FunctionTemplate::New(ReadBuffer)); global_template->Set(String::New("readline"), @@ -1055,23 +1054,6 @@ static char* ReadChars(const char* name, int* size_out) { } -Handle Shell::ReadBinary(const Arguments& args) { - String::Utf8Value filename(args[0]); - int size; - if (*filename == NULL) { - return ThrowException(String::New("Error loading file")); - } - char* chars = ReadChars(*filename, &size); - if (chars == NULL) { - return ThrowException(String::New("Error reading file")); - } - // We skip checking the string for UTF8 characters and use it raw as - // backing store for the external string with 8-bit characters. - BinaryResource* resource = new BinaryResource(chars, size); - return String::NewExternal(resource); -} - - Handle Shell::ReadBuffer(const Arguments& args) { String::Utf8Value filename(args[0]); int length; diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h index 23fdebc..b315086 100644 --- a/deps/v8/src/d8.h +++ b/deps/v8/src/d8.h @@ -307,7 +307,6 @@ class Shell : public i::AllStatic { static Handle EnableProfiler(const Arguments& args); static Handle DisableProfiler(const Arguments& args); static Handle Read(const Arguments& args); - static Handle ReadBinary(const Arguments& args); static Handle ReadBuffer(const Arguments& args); static Handle ReadFromStdin(); static Handle ReadLine(const Arguments& args) { diff --git a/deps/v8/src/debug-agent.cc b/deps/v8/src/debug-agent.cc index bdc7a57..10c0053 100644 --- a/deps/v8/src/debug-agent.cc +++ b/deps/v8/src/debug-agent.cc @@ -1,4 +1,4 @@ -// Copyright 2009 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -247,7 +247,7 @@ SmartArrayPointer DebuggerAgentUtil::ReceiveMessage(const Socket* conn) { while (!(c == '\n' && prev_c == '\r')) { prev_c = c; received = conn->Receive(&c, 1); - if (received <= 0) { + if (received == 0) { PrintF("Error %d\n", Socket::LastError()); return SmartArrayPointer(); } @@ -323,41 +323,41 @@ bool DebuggerAgentUtil::SendConnectMessage(const Socket* conn, const char* embedding_host) { static const int kBufferSize = 80; char buffer[kBufferSize]; // Sending buffer. + bool ok; int len; - int r; // Send the header. len = OS::SNPrintF(Vector(buffer, kBufferSize), "Type: connect\r\n"); - r = conn->Send(buffer, len); - if (r != len) return false; + ok = conn->Send(buffer, len); + if (!ok) return false; len = OS::SNPrintF(Vector(buffer, kBufferSize), "V8-Version: %s\r\n", v8::V8::GetVersion()); - r = conn->Send(buffer, len); - if (r != len) return false; + ok = conn->Send(buffer, len); + if (!ok) return false; len = OS::SNPrintF(Vector(buffer, kBufferSize), "Protocol-Version: 1\r\n"); - r = conn->Send(buffer, len); - if (r != len) return false; + ok = conn->Send(buffer, len); + if (!ok) return false; if (embedding_host != NULL) { len = OS::SNPrintF(Vector(buffer, kBufferSize), "Embedding-Host: %s\r\n", embedding_host); - r = conn->Send(buffer, len); - if (r != len) return false; + ok = conn->Send(buffer, len); + if (!ok) return false; } len = OS::SNPrintF(Vector(buffer, kBufferSize), "%s: 0\r\n", kContentLength); - r = conn->Send(buffer, len); - if (r != len) return false; + ok = conn->Send(buffer, len); + if (!ok) return false; // Terminate header with empty line. len = OS::SNPrintF(Vector(buffer, kBufferSize), "\r\n"); - r = conn->Send(buffer, len); - if (r != len) return false; + ok = conn->Send(buffer, len); + if (!ok) return false; // No body for connect message. @@ -454,7 +454,7 @@ int DebuggerAgentUtil::ReceiveAll(const Socket* conn, char* data, int len) { int total_received = 0; while (total_received < len) { int received = conn->Receive(data + total_received, len - total_received); - if (received <= 0) { + if (received == 0) { return total_received; } total_received += received; diff --git a/deps/v8/src/elements-kind.cc b/deps/v8/src/elements-kind.cc new file mode 100644 index 0000000..655a23b --- /dev/null +++ b/deps/v8/src/elements-kind.cc @@ -0,0 +1,134 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "elements-kind.h" + +#include "api.h" +#include "elements.h" +#include "objects.h" + +namespace v8 { +namespace internal { + + +void PrintElementsKind(FILE* out, ElementsKind kind) { + ElementsAccessor* accessor = ElementsAccessor::ForKind(kind); + PrintF(out, "%s", accessor->name()); +} + + +ElementsKind GetInitialFastElementsKind() { + if (FLAG_packed_arrays) { + return FAST_SMI_ELEMENTS; + } else { + return FAST_HOLEY_SMI_ELEMENTS; + } +} + + +struct InitializeFastElementsKindSequence { + static void Construct( + ElementsKind** fast_elements_kind_sequence_ptr) { + ElementsKind* fast_elements_kind_sequence = + new ElementsKind[kFastElementsKindCount]; + *fast_elements_kind_sequence_ptr = fast_elements_kind_sequence; + STATIC_ASSERT(FAST_SMI_ELEMENTS == FIRST_FAST_ELEMENTS_KIND); + fast_elements_kind_sequence[0] = FAST_SMI_ELEMENTS; + fast_elements_kind_sequence[1] = FAST_HOLEY_SMI_ELEMENTS; + fast_elements_kind_sequence[2] = FAST_DOUBLE_ELEMENTS; + fast_elements_kind_sequence[3] = FAST_HOLEY_DOUBLE_ELEMENTS; + fast_elements_kind_sequence[4] = FAST_ELEMENTS; + fast_elements_kind_sequence[5] = FAST_HOLEY_ELEMENTS; + } +}; + + +static LazyInstance::type + fast_elements_kind_sequence = LAZY_INSTANCE_INITIALIZER; + + +ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number) { + ASSERT(sequence_number >= 0 && + sequence_number < kFastElementsKindCount); + return fast_elements_kind_sequence.Get()[sequence_number]; +} + +int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind) { + for (int i = 0; i < kFastElementsKindCount; ++i) { + if (fast_elements_kind_sequence.Get()[i] == elements_kind) { + return i; + } + } + UNREACHABLE(); + return 0; +} + + +ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind, + bool allow_only_packed) { + ASSERT(IsFastElementsKind(elements_kind)); + ASSERT(elements_kind != TERMINAL_FAST_ELEMENTS_KIND); + while (true) { + int index = + GetSequenceIndexFromFastElementsKind(elements_kind) + 1; + elements_kind = GetFastElementsKindFromSequenceIndex(index); + if (!IsFastHoleyElementsKind(elements_kind) || !allow_only_packed) { + return elements_kind; + } + } + UNREACHABLE(); + return TERMINAL_FAST_ELEMENTS_KIND; +} + + +bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind, + ElementsKind to_kind) { + switch (from_kind) { + case FAST_SMI_ELEMENTS: + return to_kind != FAST_SMI_ELEMENTS; + case FAST_HOLEY_SMI_ELEMENTS: + return to_kind != FAST_SMI_ELEMENTS && + to_kind != FAST_HOLEY_SMI_ELEMENTS; + case FAST_DOUBLE_ELEMENTS: + return to_kind != FAST_SMI_ELEMENTS && + to_kind != FAST_HOLEY_SMI_ELEMENTS && + to_kind != FAST_DOUBLE_ELEMENTS; + case FAST_HOLEY_DOUBLE_ELEMENTS: + return to_kind == FAST_ELEMENTS || + to_kind == FAST_HOLEY_ELEMENTS; + case FAST_ELEMENTS: + return to_kind == FAST_HOLEY_ELEMENTS; + case FAST_HOLEY_ELEMENTS: + return false; + default: + return false; + } +} + + +} } // namespace v8::internal diff --git a/deps/v8/src/elements-kind.h b/deps/v8/src/elements-kind.h new file mode 100644 index 0000000..ab31a33 --- /dev/null +++ b/deps/v8/src/elements-kind.h @@ -0,0 +1,210 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_ELEMENTS_KIND_H_ +#define V8_ELEMENTS_KIND_H_ + +#include "v8checks.h" + +namespace v8 { +namespace internal { + +enum ElementsKind { + // The "fast" kind for elements that only contain SMI values. Must be first + // to make it possible to efficiently check maps for this kind. + FAST_SMI_ELEMENTS, + FAST_HOLEY_SMI_ELEMENTS, + + // The "fast" kind for tagged values. Must be second to make it possible to + // efficiently check maps for this and the FAST_SMI_ONLY_ELEMENTS kind + // together at once. + FAST_ELEMENTS, + FAST_HOLEY_ELEMENTS, + + // The "fast" kind for unwrapped, non-tagged double values. + FAST_DOUBLE_ELEMENTS, + FAST_HOLEY_DOUBLE_ELEMENTS, + + // The "slow" kind. + DICTIONARY_ELEMENTS, + NON_STRICT_ARGUMENTS_ELEMENTS, + // The "fast" kind for external arrays + EXTERNAL_BYTE_ELEMENTS, + EXTERNAL_UNSIGNED_BYTE_ELEMENTS, + EXTERNAL_SHORT_ELEMENTS, + EXTERNAL_UNSIGNED_SHORT_ELEMENTS, + EXTERNAL_INT_ELEMENTS, + EXTERNAL_UNSIGNED_INT_ELEMENTS, + EXTERNAL_FLOAT_ELEMENTS, + EXTERNAL_DOUBLE_ELEMENTS, + EXTERNAL_PIXEL_ELEMENTS, + + // Derived constants from ElementsKind + FIRST_ELEMENTS_KIND = FAST_SMI_ELEMENTS, + LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS, + FIRST_FAST_ELEMENTS_KIND = FAST_SMI_ELEMENTS, + LAST_FAST_ELEMENTS_KIND = FAST_HOLEY_DOUBLE_ELEMENTS, + FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS, + LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS, + TERMINAL_FAST_ELEMENTS_KIND = FAST_HOLEY_ELEMENTS +}; + +const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1; +const int kFastElementsKindCount = LAST_FAST_ELEMENTS_KIND - + FIRST_FAST_ELEMENTS_KIND + 1; + +void PrintElementsKind(FILE* out, ElementsKind kind); + +ElementsKind GetInitialFastElementsKind(); + +ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_index); + +int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind); + + +inline bool IsFastElementsKind(ElementsKind kind) { + ASSERT(FIRST_FAST_ELEMENTS_KIND == 0); + return kind <= FAST_HOLEY_DOUBLE_ELEMENTS; +} + + +inline bool IsFastDoubleElementsKind(ElementsKind kind) { + return kind == FAST_DOUBLE_ELEMENTS || + kind == FAST_HOLEY_DOUBLE_ELEMENTS; +} + + +inline bool IsFastSmiOrObjectElementsKind(ElementsKind kind) { + return kind == FAST_SMI_ELEMENTS || + kind == FAST_HOLEY_SMI_ELEMENTS || + kind == FAST_ELEMENTS || + kind == FAST_HOLEY_ELEMENTS; +} + + +inline bool IsFastSmiElementsKind(ElementsKind kind) { + return kind == FAST_SMI_ELEMENTS || + kind == FAST_HOLEY_SMI_ELEMENTS; +} + + +inline bool IsFastObjectElementsKind(ElementsKind kind) { + return kind == FAST_ELEMENTS || + kind == FAST_HOLEY_ELEMENTS; +} + + +inline bool IsFastHoleyElementsKind(ElementsKind kind) { + return kind == FAST_HOLEY_SMI_ELEMENTS || + kind == FAST_HOLEY_DOUBLE_ELEMENTS || + kind == FAST_HOLEY_ELEMENTS; +} + + +inline bool IsHoleyElementsKind(ElementsKind kind) { + return IsFastHoleyElementsKind(kind) || + kind == DICTIONARY_ELEMENTS; +} + + +inline bool IsFastPackedElementsKind(ElementsKind kind) { + return kind == FAST_SMI_ELEMENTS || + kind == FAST_DOUBLE_ELEMENTS || + kind == FAST_ELEMENTS; +} + + +inline ElementsKind GetPackedElementsKind(ElementsKind holey_kind) { + if (holey_kind == FAST_HOLEY_SMI_ELEMENTS) { + return FAST_SMI_ELEMENTS; + } + if (holey_kind == FAST_HOLEY_DOUBLE_ELEMENTS) { + return FAST_DOUBLE_ELEMENTS; + } + if (holey_kind == FAST_HOLEY_ELEMENTS) { + return FAST_ELEMENTS; + } + return holey_kind; +} + + +inline ElementsKind GetHoleyElementsKind(ElementsKind packed_kind) { + if (packed_kind == FAST_SMI_ELEMENTS) { + return FAST_HOLEY_SMI_ELEMENTS; + } + if (packed_kind == FAST_DOUBLE_ELEMENTS) { + return FAST_HOLEY_DOUBLE_ELEMENTS; + } + if (packed_kind == FAST_ELEMENTS) { + return FAST_HOLEY_ELEMENTS; + } + return packed_kind; +} + + +inline ElementsKind FastSmiToObjectElementsKind(ElementsKind from_kind) { + ASSERT(IsFastSmiElementsKind(from_kind)); + return (from_kind == FAST_SMI_ELEMENTS) + ? FAST_ELEMENTS + : FAST_HOLEY_ELEMENTS; +} + + +inline bool IsSimpleMapChangeTransition(ElementsKind from_kind, + ElementsKind to_kind) { + return (GetHoleyElementsKind(from_kind) == to_kind) || + (IsFastSmiElementsKind(from_kind) && + IsFastObjectElementsKind(to_kind)); +} + + +bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind, + ElementsKind to_kind); + + +inline bool IsTransitionableFastElementsKind(ElementsKind from_kind) { + return IsFastElementsKind(from_kind) && + from_kind != TERMINAL_FAST_ELEMENTS_KIND; +} + + +ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind, + bool allow_only_packed); + + +inline bool CanTransitionToMoreGeneralFastElementsKind( + ElementsKind elements_kind, + bool allow_only_packed) { + return IsFastElementsKind(elements_kind) && + (elements_kind != TERMINAL_FAST_ELEMENTS_KIND && + (!allow_only_packed || elements_kind != FAST_ELEMENTS)); +} + + +} } // namespace v8::internal + +#endif // V8_ELEMENTS_KIND_H_ diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc index d367af8..2692cb5 100644 --- a/deps/v8/src/elements.cc +++ b/deps/v8/src/elements.cc @@ -39,8 +39,14 @@ // Inheritance hierarchy: // - ElementsAccessorBase (abstract) // - FastElementsAccessor (abstract) -// - FastObjectElementsAccessor +// - FastSmiOrObjectElementsAccessor +// - FastPackedSmiElementsAccessor +// - FastHoleySmiElementsAccessor +// - FastPackedObjectElementsAccessor +// - FastHoleyObjectElementsAccessor // - FastDoubleElementsAccessor +// - FastPackedDoubleElementsAccessor +// - FastHoleyDoubleElementsAccessor // - ExternalElementsAccessor (abstract) // - ExternalByteElementsAccessor // - ExternalUnsignedByteElementsAccessor @@ -65,9 +71,15 @@ namespace internal { // identical. Note that the order must match that of the ElementsKind enum for // the |accessor_array[]| below to work. #define ELEMENTS_LIST(V) \ - V(FastObjectElementsAccessor, FAST_SMI_ONLY_ELEMENTS, FixedArray) \ - V(FastObjectElementsAccessor, FAST_ELEMENTS, FixedArray) \ - V(FastDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS, FixedDoubleArray) \ + V(FastPackedSmiElementsAccessor, FAST_SMI_ELEMENTS, FixedArray) \ + V(FastHoleySmiElementsAccessor, FAST_HOLEY_SMI_ELEMENTS, \ + FixedArray) \ + V(FastPackedObjectElementsAccessor, FAST_ELEMENTS, FixedArray) \ + V(FastHoleyObjectElementsAccessor, FAST_HOLEY_ELEMENTS, FixedArray) \ + V(FastPackedDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS, \ + FixedDoubleArray) \ + V(FastHoleyDoubleElementsAccessor, FAST_HOLEY_DOUBLE_ELEMENTS, \ + FixedDoubleArray) \ V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS, \ SeededNumberDictionary) \ V(NonStrictArgumentsElementsAccessor, NON_STRICT_ARGUMENTS_ELEMENTS, \ @@ -139,8 +151,6 @@ void CopyObjectToObjectElements(FixedArray* from, uint32_t to_start, int raw_copy_size) { ASSERT(to->map() != HEAP->fixed_cow_array_map()); - ASSERT(from_kind == FAST_ELEMENTS || from_kind == FAST_SMI_ONLY_ELEMENTS); - ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS); int copy_size = raw_copy_size; if (raw_copy_size < 0) { ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd || @@ -148,7 +158,7 @@ void CopyObjectToObjectElements(FixedArray* from, copy_size = Min(from->length() - from_start, to->length() - to_start); #ifdef DEBUG - // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already + // FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already // marked with the hole. if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { for (int i = to_start + copy_size; i < to->length(); ++i) { @@ -160,12 +170,15 @@ void CopyObjectToObjectElements(FixedArray* from, ASSERT((copy_size + static_cast(to_start)) <= to->length() && (copy_size + static_cast(from_start)) <= from->length()); if (copy_size == 0) return; + ASSERT(IsFastSmiOrObjectElementsKind(from_kind)); + ASSERT(IsFastSmiOrObjectElementsKind(to_kind)); Address to_address = to->address() + FixedArray::kHeaderSize; Address from_address = from->address() + FixedArray::kHeaderSize; CopyWords(reinterpret_cast(to_address) + to_start, reinterpret_cast(from_address) + from_start, copy_size); - if (from_kind == FAST_ELEMENTS && to_kind == FAST_ELEMENTS) { + if (IsFastObjectElementsKind(from_kind) && + IsFastObjectElementsKind(to_kind)) { Heap* heap = from->GetHeap(); if (!heap->InNewSpace(to)) { heap->RecordWrites(to->address(), @@ -190,7 +203,7 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from, raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); copy_size = from->max_number_key() + 1 - from_start; #ifdef DEBUG - // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already + // Fast object arrays cannot be uninitialized. Ensure they are already // marked with the hole. if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { for (int i = to_start + copy_size; i < to->length(); ++i) { @@ -200,7 +213,7 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from, #endif } ASSERT(to != from); - ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS); + ASSERT(IsFastSmiOrObjectElementsKind(to_kind)); if (copy_size == 0) return; uint32_t to_length = to->length(); if (to_start + copy_size > to_length) { @@ -216,7 +229,7 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from, to->set_the_hole(i + to_start); } } - if (to_kind == FAST_ELEMENTS) { + if (IsFastObjectElementsKind(to_kind)) { if (!heap->InNewSpace(to)) { heap->RecordWrites(to->address(), to->OffsetOfElementAt(to_start), @@ -234,7 +247,7 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements( ElementsKind to_kind, uint32_t to_start, int raw_copy_size) { - ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS); + ASSERT(IsFastSmiOrObjectElementsKind(to_kind)); int copy_size = raw_copy_size; if (raw_copy_size < 0) { ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd || @@ -242,7 +255,7 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements( copy_size = Min(from->length() - from_start, to->length() - to_start); #ifdef DEBUG - // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already + // FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already // marked with the hole. if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { for (int i = to_start + copy_size; i < to->length(); ++i) { @@ -255,14 +268,14 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements( (copy_size + static_cast(from_start)) <= from->length()); if (copy_size == 0) return from; for (int i = 0; i < copy_size; ++i) { - if (to_kind == FAST_SMI_ONLY_ELEMENTS) { + if (IsFastSmiElementsKind(to_kind)) { UNIMPLEMENTED(); return Failure::Exception(); } else { MaybeObject* maybe_value = from->get(i + from_start); Object* value; - ASSERT(to_kind == FAST_ELEMENTS); - // Because FAST_DOUBLE_ELEMENTS -> FAST_ELEMENT allocate HeapObjects + ASSERT(IsFastObjectElementsKind(to_kind)); + // Because Double -> Object elements transitions allocate HeapObjects // iteratively, the allocate must succeed within a single GC cycle, // otherwise the retry after the GC will also fail. In order to ensure // that no GC is triggered, allocate HeapNumbers from old space if they @@ -404,6 +417,38 @@ class ElementsAccessorBase : public ElementsAccessor { virtual ElementsKind kind() const { return ElementsTraits::Kind; } + static void ValidateContents(JSObject* holder, int length) { + } + + static void ValidateImpl(JSObject* holder) { + FixedArrayBase* fixed_array_base = holder->elements(); + // When objects are first allocated, its elements are Failures. + if (fixed_array_base->IsFailure()) return; + if (!fixed_array_base->IsHeapObject()) return; + Map* map = fixed_array_base->map(); + // Arrays that have been shifted in place can't be verified. + Heap* heap = holder->GetHeap(); + if (map == heap->raw_unchecked_one_pointer_filler_map() || + map == heap->raw_unchecked_two_pointer_filler_map() || + map == heap->free_space_map()) { + return; + } + int length = 0; + if (holder->IsJSArray()) { + Object* length_obj = JSArray::cast(holder)->length(); + if (length_obj->IsSmi()) { + length = Smi::cast(length_obj)->value(); + } + } else { + length = fixed_array_base->length(); + } + ElementsAccessorSubclass::ValidateContents(holder, length); + } + + virtual void Validate(JSObject* holder) { + ElementsAccessorSubclass::ValidateImpl(holder); + } + static bool HasElementImpl(Object* receiver, JSObject* holder, uint32_t key, @@ -455,9 +500,10 @@ class ElementsAccessorBase : public ElementsAccessor { Object* length, BackingStore* backing_store); - MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(JSArray* array, - int capacity, - int length) { + MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength( + JSArray* array, + int capacity, + int length) { return ElementsAccessorSubclass::SetFastElementsCapacityAndLength( array, capacity, @@ -623,6 +669,7 @@ class FastElementsAccessor KindTraits>(name) {} protected: friend class ElementsAccessorBase; + friend class NonStrictArgumentsElementsAccessor; typedef typename KindTraits::BackingStore BackingStore; @@ -633,10 +680,21 @@ class FastElementsAccessor Object* length_object, uint32_t length) { uint32_t old_capacity = backing_store->length(); + Object* old_length = array->length(); + bool same_size = old_length->IsSmi() && + static_cast(Smi::cast(old_length)->value()) == length; + ElementsKind kind = array->GetElementsKind(); + + if (!same_size && IsFastElementsKind(kind) && + !IsFastHoleyElementsKind(kind)) { + kind = GetHoleyElementsKind(kind); + MaybeObject* maybe_obj = array->TransitionElementsKind(kind); + if (maybe_obj->IsFailure()) return maybe_obj; + } // Check whether the backing store should be shrunk. if (length <= old_capacity) { - if (array->HasFastTypeElements()) { + if (array->HasFastSmiOrObjectElements()) { MaybeObject* maybe_obj = array->EnsureWritableFastElements(); if (!maybe_obj->To(&backing_store)) return maybe_obj; } @@ -668,39 +726,40 @@ class FastElementsAccessor MaybeObject* result = FastElementsAccessorSubclass:: SetFastElementsCapacityAndLength(array, new_capacity, length); if (result->IsFailure()) return result; + array->ValidateElements(); return length_object; } // Request conversion to slow elements. return array->GetHeap()->undefined_value(); } -}; - - -class FastObjectElementsAccessor - : public FastElementsAccessor, - kPointerSize> { - public: - explicit FastObjectElementsAccessor(const char* name) - : FastElementsAccessor, - kPointerSize>(name) {} static MaybeObject* DeleteCommon(JSObject* obj, - uint32_t key) { - ASSERT(obj->HasFastElements() || - obj->HasFastSmiOnlyElements() || + uint32_t key, + JSReceiver::DeleteMode mode) { + ASSERT(obj->HasFastSmiOrObjectElements() || + obj->HasFastDoubleElements() || obj->HasFastArgumentsElements()); + typename KindTraits::BackingStore* backing_store = + KindTraits::BackingStore::cast(obj->elements()); Heap* heap = obj->GetHeap(); - FixedArray* backing_store = FixedArray::cast(obj->elements()); if (backing_store->map() == heap->non_strict_arguments_elements_map()) { - backing_store = FixedArray::cast(backing_store->get(1)); + backing_store = + KindTraits::BackingStore::cast( + FixedArray::cast(backing_store)->get(1)); } else { - Object* writable; - MaybeObject* maybe = obj->EnsureWritableFastElements(); - if (!maybe->ToObject(&writable)) return maybe; - backing_store = FixedArray::cast(writable); + ElementsKind kind = KindTraits::Kind; + if (IsFastPackedElementsKind(kind)) { + MaybeObject* transitioned = + obj->TransitionElementsKind(GetHoleyElementsKind(kind)); + if (transitioned->IsFailure()) return transitioned; + } + if (IsFastSmiOrObjectElementsKind(KindTraits::Kind)) { + Object* writable; + MaybeObject* maybe = obj->EnsureWritableFastElements(); + if (!maybe->ToObject(&writable)) return maybe; + backing_store = KindTraits::BackingStore::cast(writable); + } } uint32_t length = static_cast( obj->IsJSArray() @@ -712,15 +771,14 @@ class FastObjectElementsAccessor // has too few used values, normalize it. // To avoid doing the check on every delete we require at least // one adjacent hole to the value being deleted. - Object* hole = heap->the_hole_value(); const int kMinLengthForSparsenessCheck = 64; if (backing_store->length() >= kMinLengthForSparsenessCheck && !heap->InNewSpace(backing_store) && - ((key > 0 && backing_store->get(key - 1) == hole) || - (key + 1 < length && backing_store->get(key + 1) == hole))) { + ((key > 0 && backing_store->is_the_hole(key - 1)) || + (key + 1 < length && backing_store->is_the_hole(key + 1)))) { int num_used = 0; for (int i = 0; i < backing_store->length(); ++i) { - if (backing_store->get(i) != hole) ++num_used; + if (!backing_store->is_the_hole(i)) ++num_used; // Bail out early if more than 1/4 is used. if (4 * num_used > backing_store->length()) break; } @@ -733,27 +791,75 @@ class FastObjectElementsAccessor return heap->true_value(); } + virtual MaybeObject* Delete(JSObject* obj, + uint32_t key, + JSReceiver::DeleteMode mode) { + return DeleteCommon(obj, key, mode); + } + + static bool HasElementImpl( + Object* receiver, + JSObject* holder, + uint32_t key, + typename KindTraits::BackingStore* backing_store) { + if (key >= static_cast(backing_store->length())) { + return false; + } + return !backing_store->is_the_hole(key); + } + + static void ValidateContents(JSObject* holder, int length) { +#if DEBUG + FixedArrayBase* elements = holder->elements(); + Heap* heap = elements->GetHeap(); + Map* map = elements->map(); + ASSERT((IsFastSmiOrObjectElementsKind(KindTraits::Kind) && + (map == heap->fixed_array_map() || + map == heap->fixed_cow_array_map())) || + (IsFastDoubleElementsKind(KindTraits::Kind) == + ((map == heap->fixed_array_map() && length == 0) || + map == heap->fixed_double_array_map()))); + for (int i = 0; i < length; i++) { + typename KindTraits::BackingStore* backing_store = + KindTraits::BackingStore::cast(elements); + ASSERT((!IsFastSmiElementsKind(KindTraits::Kind) || + static_cast(backing_store->get(i))->IsSmi()) || + (IsFastHoleyElementsKind(KindTraits::Kind) == + backing_store->is_the_hole(i))); + } +#endif + } +}; + + +template +class FastSmiOrObjectElementsAccessor + : public FastElementsAccessor { + public: + explicit FastSmiOrObjectElementsAccessor(const char* name) + : FastElementsAccessor(name) {} + static MaybeObject* CopyElementsImpl(FixedArrayBase* from, uint32_t from_start, FixedArrayBase* to, ElementsKind to_kind, uint32_t to_start, int copy_size) { - switch (to_kind) { - case FAST_SMI_ONLY_ELEMENTS: - case FAST_ELEMENTS: { - CopyObjectToObjectElements( - FixedArray::cast(from), ElementsTraits::Kind, from_start, - FixedArray::cast(to), to_kind, to_start, copy_size); - return from; - } - case FAST_DOUBLE_ELEMENTS: - CopyObjectToDoubleElements( - FixedArray::cast(from), from_start, - FixedDoubleArray::cast(to), to_start, copy_size); - return from; - default: - UNREACHABLE(); + if (IsFastSmiOrObjectElementsKind(to_kind)) { + CopyObjectToObjectElements( + FixedArray::cast(from), KindTraits::Kind, from_start, + FixedArray::cast(to), to_kind, to_start, copy_size); + } else if (IsFastDoubleElementsKind(to_kind)) { + CopyObjectToDoubleElements( + FixedArray::cast(from), from_start, + FixedDoubleArray::cast(to), to_start, copy_size); + } else { + UNREACHABLE(); } return to->GetHeap()->undefined_value(); } @@ -762,51 +868,85 @@ class FastObjectElementsAccessor static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj, uint32_t capacity, uint32_t length) { - JSObject::SetFastElementsCapacityMode set_capacity_mode = - obj->HasFastSmiOnlyElements() - ? JSObject::kAllowSmiOnlyElements - : JSObject::kDontAllowSmiOnlyElements; + JSObject::SetFastElementsCapacitySmiMode set_capacity_mode = + obj->HasFastSmiElements() + ? JSObject::kAllowSmiElements + : JSObject::kDontAllowSmiElements; return obj->SetFastElementsCapacityAndLength(capacity, length, set_capacity_mode); } +}; - protected: - friend class FastElementsAccessor, - kPointerSize>; - virtual MaybeObject* Delete(JSObject* obj, - uint32_t key, - JSReceiver::DeleteMode mode) { - return DeleteCommon(obj, key); - } +class FastPackedSmiElementsAccessor + : public FastSmiOrObjectElementsAccessor< + FastPackedSmiElementsAccessor, + ElementsKindTraits > { + public: + explicit FastPackedSmiElementsAccessor(const char* name) + : FastSmiOrObjectElementsAccessor< + FastPackedSmiElementsAccessor, + ElementsKindTraits >(name) {} +}; + + +class FastHoleySmiElementsAccessor + : public FastSmiOrObjectElementsAccessor< + FastHoleySmiElementsAccessor, + ElementsKindTraits > { + public: + explicit FastHoleySmiElementsAccessor(const char* name) + : FastSmiOrObjectElementsAccessor< + FastHoleySmiElementsAccessor, + ElementsKindTraits >(name) {} +}; + + +class FastPackedObjectElementsAccessor + : public FastSmiOrObjectElementsAccessor< + FastPackedObjectElementsAccessor, + ElementsKindTraits > { + public: + explicit FastPackedObjectElementsAccessor(const char* name) + : FastSmiOrObjectElementsAccessor< + FastPackedObjectElementsAccessor, + ElementsKindTraits >(name) {} +}; + + +class FastHoleyObjectElementsAccessor + : public FastSmiOrObjectElementsAccessor< + FastHoleyObjectElementsAccessor, + ElementsKindTraits > { + public: + explicit FastHoleyObjectElementsAccessor(const char* name) + : FastSmiOrObjectElementsAccessor< + FastHoleyObjectElementsAccessor, + ElementsKindTraits >(name) {} }; +template class FastDoubleElementsAccessor - : public FastElementsAccessor, + : public FastElementsAccessor { public: explicit FastDoubleElementsAccessor(const char* name) - : FastElementsAccessor, + : FastElementsAccessor(name) {} static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj, uint32_t capacity, uint32_t length) { - return obj->SetFastDoubleElementsCapacityAndLength(capacity, length); + return obj->SetFastDoubleElementsCapacityAndLength(capacity, + length); } protected: - friend class ElementsAccessorBase >; - friend class FastElementsAccessor, - kDoubleSize>; - static MaybeObject* CopyElementsImpl(FixedArrayBase* from, uint32_t from_start, FixedArrayBase* to, @@ -814,12 +954,15 @@ class FastDoubleElementsAccessor uint32_t to_start, int copy_size) { switch (to_kind) { - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: return CopyDoubleToObjectElements( FixedDoubleArray::cast(from), from_start, FixedArray::cast(to), to_kind, to_start, copy_size); case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: CopyDoubleToDoubleElements(FixedDoubleArray::cast(from), from_start, FixedDoubleArray::cast(to), to_start, copy_size); @@ -829,26 +972,35 @@ class FastDoubleElementsAccessor } return to->GetHeap()->undefined_value(); } +}; - virtual MaybeObject* Delete(JSObject* obj, - uint32_t key, - JSReceiver::DeleteMode mode) { - int length = obj->IsJSArray() - ? Smi::cast(JSArray::cast(obj)->length())->value() - : FixedDoubleArray::cast(obj->elements())->length(); - if (key < static_cast(length)) { - FixedDoubleArray::cast(obj->elements())->set_the_hole(key); - } - return obj->GetHeap()->true_value(); - } - static bool HasElementImpl(Object* receiver, - JSObject* holder, - uint32_t key, - FixedDoubleArray* backing_store) { - return key < static_cast(backing_store->length()) && - !backing_store->is_the_hole(key); - } +class FastPackedDoubleElementsAccessor + : public FastDoubleElementsAccessor< + FastPackedDoubleElementsAccessor, + ElementsKindTraits > { + public: + friend class ElementsAccessorBase >; + explicit FastPackedDoubleElementsAccessor(const char* name) + : FastDoubleElementsAccessor< + FastPackedDoubleElementsAccessor, + ElementsKindTraits >(name) {} +}; + + +class FastHoleyDoubleElementsAccessor + : public FastDoubleElementsAccessor< + FastHoleyDoubleElementsAccessor, + ElementsKindTraits > { + public: + friend class ElementsAccessorBase< + FastHoleyDoubleElementsAccessor, + ElementsKindTraits >; + explicit FastHoleyDoubleElementsAccessor(const char* name) + : FastDoubleElementsAccessor< + FastHoleyDoubleElementsAccessor, + ElementsKindTraits >(name) {} }; @@ -1115,13 +1267,16 @@ class DictionaryElementsAccessor uint32_t to_start, int copy_size) { switch (to_kind) { - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: CopyDictionaryToObjectElements( SeededNumberDictionary::cast(from), from_start, FixedArray::cast(to), to_kind, to_start, copy_size); return from; case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: CopyDictionaryToDoubleElements( SeededNumberDictionary::cast(from), from_start, FixedDoubleArray::cast(to), to_start, copy_size); @@ -1248,7 +1403,10 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase< if (arguments->IsDictionary()) { return DictionaryElementsAccessor::DeleteCommon(obj, key, mode); } else { - return FastObjectElementsAccessor::DeleteCommon(obj, key); + // It's difficult to access the version of DeleteCommon that is declared + // in the templatized super class, call the concrete implementation in + // the class for the most generalized ElementsKind subclass. + return FastHoleyObjectElementsAccessor::DeleteCommon(obj, key, mode); } } return obj->GetHeap()->true_value(); @@ -1312,7 +1470,7 @@ ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) { if (array->IsDictionary()) { return elements_accessors_[DICTIONARY_ELEMENTS]; } else { - return elements_accessors_[FAST_ELEMENTS]; + return elements_accessors_[FAST_HOLEY_ELEMENTS]; } case EXTERNAL_BYTE_ARRAY_TYPE: return elements_accessors_[EXTERNAL_BYTE_ELEMENTS]; diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h index 55d6fa5..822fca5 100644 --- a/deps/v8/src/elements.h +++ b/deps/v8/src/elements.h @@ -28,6 +28,7 @@ #ifndef V8_ELEMENTS_H_ #define V8_ELEMENTS_H_ +#include "elements-kind.h" #include "objects.h" #include "heap.h" #include "isolate.h" @@ -45,6 +46,10 @@ class ElementsAccessor { virtual ElementsKind kind() const = 0; const char* name() const { return name_; } + // Checks the elements of an object for consistency, asserting when a problem + // is found. + virtual void Validate(JSObject* obj) = 0; + // Returns true if a holder contains an element with the specified key // without iterating up the prototype chain. The caller can optionally pass // in the backing store to use for the check, which must be compatible with diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc index 6bb7893..1c29ea1 100644 --- a/deps/v8/src/factory.cc +++ b/deps/v8/src/factory.cc @@ -775,7 +775,7 @@ Handle Factory::NewFunctionWithPrototype(Handle name, instance_size != JSObject::kHeaderSize) { Handle initial_map = NewMap(type, instance_size, - FAST_SMI_ONLY_ELEMENTS); + GetInitialFastElementsKind()); function->set_initial_map(*initial_map); initial_map->set_constructor(*function); } @@ -1013,10 +1013,11 @@ void Factory::EnsureCanContainHeapObjectElements(Handle array) { void Factory::EnsureCanContainElements(Handle array, Handle elements, + uint32_t length, EnsureElementsMode mode) { CALL_HEAP_FUNCTION_VOID( isolate(), - array->EnsureCanContainElements(*elements, mode)); + array->EnsureCanContainElements(*elements, length, mode)); } diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h index 06aad1b..a999b15 100644 --- a/deps/v8/src/factory.h +++ b/deps/v8/src/factory.h @@ -216,9 +216,10 @@ class Factory { Handle NewJSGlobalPropertyCell( Handle value); - Handle NewMap(InstanceType type, - int instance_size, - ElementsKind elements_kind = FAST_ELEMENTS); + Handle NewMap( + InstanceType type, + int instance_size, + ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND); Handle NewFunctionPrototype(Handle function); @@ -269,13 +270,14 @@ class Factory { Handle NewJSModule(); // JS arrays are pretenured when allocated by the parser. - Handle NewJSArray(int capacity, - ElementsKind elements_kind = FAST_ELEMENTS, - PretenureFlag pretenure = NOT_TENURED); + Handle NewJSArray( + int capacity, + ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND, + PretenureFlag pretenure = NOT_TENURED); Handle NewJSArrayWithElements( Handle elements, - ElementsKind elements_kind = FAST_ELEMENTS, + ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND, PretenureFlag pretenure = NOT_TENURED); void SetElementsCapacityAndLength(Handle array, @@ -287,6 +289,7 @@ class Factory { void EnsureCanContainHeapObjectElements(Handle array); void EnsureCanContainElements(Handle array, Handle elements, + uint32_t length, EnsureElementsMode mode); Handle NewJSProxy(Handle handler, Handle prototype); diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h index 62a9782..fc9a1db 100644 --- a/deps/v8/src/flag-definitions.h +++ b/deps/v8/src/flag-definitions.h @@ -150,6 +150,7 @@ DEFINE_implication(harmony, harmony_collections) DEFINE_implication(harmony_modules, harmony_scoping) // Flags for experimental implementation features. +DEFINE_bool(packed_arrays, false, "optimizes arrays that have no holes") DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values") DEFINE_bool(clever_optimizations, true, @@ -197,6 +198,8 @@ DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining") DEFINE_bool(use_osr, true, "use on-stack replacement") DEFINE_bool(array_bounds_checks_elimination, true, "perform array bounds checks elimination") +DEFINE_bool(array_index_dehoisting, false, + "perform array index dehoisting") DEFINE_bool(trace_osr, false, "trace on-stack replacement") DEFINE_int(stress_runs, 0, "number of stress runs") diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc index b8794c0..9b1df4e 100644 --- a/deps/v8/src/full-codegen.cc +++ b/deps/v8/src/full-codegen.cc @@ -314,7 +314,8 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) { Code::Flags flags = Code::ComputeFlags(Code::FUNCTION); Handle code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info); code->set_optimizable(info->IsOptimizable() && - !info->function()->flags()->Contains(kDontOptimize)); + !info->function()->flags()->Contains(kDontOptimize) && + info->function()->scope()->AllowsLazyRecompilation()); cgen.PopulateDeoptimizationData(code); cgen.PopulateTypeFeedbackInfo(code); cgen.PopulateTypeFeedbackCells(code); diff --git a/deps/v8/src/func-name-inferrer.h b/deps/v8/src/func-name-inferrer.h index 1a57268..ccd962a 100644 --- a/deps/v8/src/func-name-inferrer.h +++ b/deps/v8/src/func-name-inferrer.h @@ -88,6 +88,8 @@ class FuncNameInferrer : public ZoneObject { void Leave() { ASSERT(IsOpen()); names_stack_.Rewind(entries_stack_.RemoveLast()); + if (entries_stack_.is_empty()) + funcs_to_infer_.Clear(); } private: diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h index e12895a..9d79db2 100644 --- a/deps/v8/src/heap-inl.h +++ b/deps/v8/src/heap-inl.h @@ -595,12 +595,24 @@ void ExternalStringTable::Iterate(ObjectVisitor* v) { void ExternalStringTable::Verify() { #ifdef DEBUG for (int i = 0; i < new_space_strings_.length(); ++i) { - ASSERT(heap_->InNewSpace(new_space_strings_[i])); - ASSERT(new_space_strings_[i] != HEAP->raw_unchecked_the_hole_value()); + Object* obj = Object::cast(new_space_strings_[i]); + // TODO(yangguo): check that the object is indeed an external string. + ASSERT(heap_->InNewSpace(obj)); + ASSERT(obj != HEAP->raw_unchecked_the_hole_value()); + if (obj->IsExternalAsciiString()) { + ExternalAsciiString* string = ExternalAsciiString::cast(obj); + ASSERT(String::IsAscii(string->GetChars(), string->length())); + } } for (int i = 0; i < old_space_strings_.length(); ++i) { - ASSERT(!heap_->InNewSpace(old_space_strings_[i])); - ASSERT(old_space_strings_[i] != HEAP->raw_unchecked_the_hole_value()); + Object* obj = Object::cast(old_space_strings_[i]); + // TODO(yangguo): check that the object is indeed an external string. + ASSERT(!heap_->InNewSpace(obj)); + ASSERT(obj != HEAP->raw_unchecked_the_hole_value()); + if (obj->IsExternalAsciiString()) { + ExternalAsciiString* string = ExternalAsciiString::cast(obj); + ASSERT(String::IsAscii(string->GetChars(), string->length())); + } } #endif } diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc index d3c7f0a..a224e2b 100644 --- a/deps/v8/src/heap.cc +++ b/deps/v8/src/heap.cc @@ -2469,7 +2469,7 @@ bool Heap::CreateApiObjects() { // bottleneck to trap the Smi-only -> fast elements transition, and there // appears to be no benefit for optimize this case. Map* new_neander_map = Map::cast(obj); - new_neander_map->set_elements_kind(FAST_ELEMENTS); + new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND); set_neander_map(new_neander_map); { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map()); @@ -3050,6 +3050,7 @@ MaybeObject* Heap::AllocateJSMessageObject(String* type, } JSMessageObject* message = JSMessageObject::cast(result); message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER); + message->initialize_elements(); message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER); message->set_type(type); message->set_arguments(arguments); @@ -3326,6 +3327,8 @@ MaybeObject* Heap::AllocateExternalStringFromAscii( return Failure::OutOfMemoryException(); } + ASSERT(String::IsAscii(resource->data(), static_cast(length))); + Map* map = external_ascii_string_map(); Object* result; { MaybeObject* maybe_result = Allocate(map, NEW_SPACE); @@ -3751,7 +3754,7 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) { // Check the state of the object ASSERT(JSObject::cast(result)->HasFastProperties()); - ASSERT(JSObject::cast(result)->HasFastElements()); + ASSERT(JSObject::cast(result)->HasFastObjectElements()); return result; } @@ -3796,7 +3799,7 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) { map->set_inobject_properties(in_object_properties); map->set_unused_property_fields(in_object_properties); map->set_prototype(prototype); - ASSERT(map->has_fast_elements()); + ASSERT(map->has_fast_object_elements()); // If the function has only simple this property assignments add // field descriptors for these to the initial map as the object @@ -3913,8 +3916,7 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) { InitializeJSObjectFromMap(JSObject::cast(obj), FixedArray::cast(properties), map); - ASSERT(JSObject::cast(obj)->HasFastSmiOnlyElements() || - JSObject::cast(obj)->HasFastElements()); + ASSERT(JSObject::cast(obj)->HasFastSmiOrObjectElements()); return obj; } @@ -3959,6 +3961,9 @@ MaybeObject* Heap::AllocateJSArrayAndStorage( ArrayStorageAllocationMode mode, PretenureFlag pretenure) { ASSERT(capacity >= length); + if (length != 0 && mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE) { + elements_kind = GetHoleyElementsKind(elements_kind); + } MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure); JSArray* array; if (!maybe_array->To(&array)) return maybe_array; @@ -3979,8 +3984,7 @@ MaybeObject* Heap::AllocateJSArrayAndStorage( maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity); } } else { - ASSERT(elements_kind == FAST_ELEMENTS || - elements_kind == FAST_SMI_ONLY_ELEMENTS); + ASSERT(IsFastSmiOrObjectElementsKind(elements_kind)); if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) { maybe_elms = AllocateUninitializedFixedArray(capacity); } else { @@ -4006,6 +4010,7 @@ MaybeObject* Heap::AllocateJSArrayWithElements( array->set_elements(elements); array->set_length(Smi::FromInt(elements->length())); + array->ValidateElements(); return array; } @@ -4490,6 +4495,16 @@ MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) { String::cast(result)->set_length(length); String::cast(result)->set_hash_field(String::kEmptyHashField); ASSERT_EQ(size, HeapObject::cast(result)->Size()); + +#ifdef DEBUG + if (FLAG_verify_heap) { + // Initialize string's content to ensure ASCII-ness (character range 0-127) + // as required when verifying the heap. + char* dest = SeqAsciiString::cast(result)->GetChars(); + memset(dest, 0x0F, length * kCharSize); + } +#endif // DEBUG + return result; } @@ -4536,13 +4551,13 @@ MaybeObject* Heap::AllocateJSArray( Context* global_context = isolate()->context()->global_context(); JSFunction* array_function = global_context->array_function(); Map* map = array_function->initial_map(); - if (elements_kind == FAST_DOUBLE_ELEMENTS) { - map = Map::cast(global_context->double_js_array_map()); - } else if (elements_kind == FAST_ELEMENTS || !FLAG_smi_only_arrays) { - map = Map::cast(global_context->object_js_array_map()); - } else { - ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS); - ASSERT(map == global_context->smi_js_array_map()); + Object* maybe_map_array = global_context->js_array_maps(); + if (!maybe_map_array->IsUndefined()) { + Object* maybe_transitioned_map = + FixedArray::cast(maybe_map_array)->get(elements_kind); + if (!maybe_transitioned_map->IsUndefined()) { + map = Map::cast(maybe_transitioned_map); + } } return AllocateJSObjectFromMap(map, pretenure); @@ -4827,9 +4842,7 @@ MaybeObject* Heap::AllocateGlobalContext() { } Context* context = reinterpret_cast(result); context->set_map_no_write_barrier(global_context_map()); - context->set_smi_js_array_map(undefined_value()); - context->set_double_js_array_map(undefined_value()); - context->set_object_js_array_map(undefined_value()); + context->set_js_array_maps(undefined_value()); ASSERT(context->IsGlobalContext()); ASSERT(result->IsContext()); return result; diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h index beb1bc5..55e7135 100644 --- a/deps/v8/src/heap.h +++ b/deps/v8/src/heap.h @@ -621,7 +621,7 @@ class Heap { MUST_USE_RESULT MaybeObject* AllocateMap( InstanceType instance_type, int instance_size, - ElementsKind elements_kind = FAST_ELEMENTS); + ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND); // Allocates a partial map for bootstrapping. MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type, diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc index c66a7a1..57a1862 100644 --- a/deps/v8/src/hydrogen-instructions.cc +++ b/deps/v8/src/hydrogen-instructions.cc @@ -1685,6 +1685,9 @@ void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) { stream->Add("["); key()->PrintNameTo(stream); stream->Add("]"); + if (hole_check_mode_ == PERFORM_HOLE_CHECK) { + stream->Add(" check_hole"); + } } @@ -1736,7 +1739,7 @@ HValue* HLoadKeyedGeneric::Canonicalize() { HInstruction* index = new(block()->zone()) HLoadKeyedFastElement( index_cache, key_load->key(), - HLoadKeyedFastElement::OMIT_HOLE_CHECK); + OMIT_HOLE_CHECK); HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex( object(), index); map_check->InsertBefore(this); @@ -1784,8 +1787,11 @@ void HLoadKeyedSpecializedArrayElement::PrintDataTo( stream->Add("pixel"); break; case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -1882,9 +1888,12 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo( case EXTERNAL_PIXEL_ELEMENTS: stream->Add("pixel"); break; - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -1899,7 +1908,13 @@ void HStoreKeyedSpecializedArrayElement::PrintDataTo( void HTransitionElementsKind::PrintDataTo(StringStream* stream) { object()->PrintNameTo(stream); - stream->Add(" %p -> %p", *original_map(), *transitioned_map()); + ElementsKind from_kind = original_map()->elements_kind(); + ElementsKind to_kind = transitioned_map()->elements_kind(); + stream->Add(" %p [%s] -> %p [%s]", + *original_map(), + ElementsAccessor::ForKind(from_kind)->name(), + *transitioned_map(), + ElementsAccessor::ForKind(to_kind)->name()); } diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h index 9d262fc..c68befd 100644 --- a/deps/v8/src/hydrogen-instructions.h +++ b/deps/v8/src/hydrogen-instructions.h @@ -2083,28 +2083,21 @@ class HCheckMaps: public HTemplateInstruction<2> { HCheckMaps* check_map = new HCheckMaps(object, map); SmallMapList* map_set = check_map->map_set(); - // If the map to check has the untransitioned elements, it can be hoisted - // above TransitionElements instructions. - if (map->has_fast_smi_only_elements()) { - check_map->ClearGVNFlag(kDependsOnElementsKind); - } - - Map* transitioned_fast_element_map = - map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL); - ASSERT(transitioned_fast_element_map == NULL || - map->elements_kind() != FAST_ELEMENTS); - if (transitioned_fast_element_map != NULL) { - map_set->Add(Handle(transitioned_fast_element_map)); - } - Map* transitioned_double_map = - map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL); - ASSERT(transitioned_double_map == NULL || - map->elements_kind() == FAST_SMI_ONLY_ELEMENTS); - if (transitioned_double_map != NULL) { - map_set->Add(Handle(transitioned_double_map)); - } + // Since transitioned elements maps of the initial map don't fail the map + // check, the CheckMaps instruction doesn't need to depend on ElementsKinds. + check_map->ClearGVNFlag(kDependsOnElementsKind); + + ElementsKind kind = map->elements_kind(); + bool packed = IsFastPackedElementsKind(kind); + while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) { + kind = GetNextMoreGeneralFastElementsKind(kind, packed); + Map* transitioned_map = + map->LookupElementsTransitionMap(kind, NULL); + if (transitioned_map) { + map_set->Add(Handle(transitioned_map)); + } + }; map_set->Sort(); - return check_map; } @@ -3946,15 +3939,28 @@ class HLoadFunctionPrototype: public HUnaryOperation { virtual bool DataEquals(HValue* other) { return true; } }; - -class HLoadKeyedFastElement: public HTemplateInstruction<2> { +class ArrayInstructionInterface { public: - enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK }; + virtual HValue* GetKey() = 0; + virtual void SetKey(HValue* key) = 0; + virtual void SetIndexOffset(uint32_t index_offset) = 0; + virtual bool IsDehoisted() = 0; + virtual void SetDehoisted(bool is_dehoisted) = 0; + virtual ~ArrayInstructionInterface() { }; +}; + +enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK }; + +class HLoadKeyedFastElement + : public HTemplateInstruction<2>, public ArrayInstructionInterface { + public: HLoadKeyedFastElement(HValue* obj, HValue* key, HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK) - : hole_check_mode_(hole_check_mode) { + : hole_check_mode_(hole_check_mode), + index_offset_(0), + is_dehoisted_(false) { SetOperandAt(0, obj); SetOperandAt(1, key); set_representation(Representation::Tagged()); @@ -3964,6 +3970,12 @@ class HLoadKeyedFastElement: public HTemplateInstruction<2> { HValue* object() { return OperandAt(0); } HValue* key() { return OperandAt(1); } + uint32_t index_offset() { return index_offset_; } + void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; } + HValue* GetKey() { return key(); } + void SetKey(HValue* key) { SetOperandAt(1, key); } + bool IsDehoisted() { return is_dehoisted_; } + void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; } virtual Representation RequiredInputRepresentation(int index) { // The key is supposed to be Integer32. @@ -3982,26 +3994,43 @@ class HLoadKeyedFastElement: public HTemplateInstruction<2> { virtual bool DataEquals(HValue* other) { if (!other->IsLoadKeyedFastElement()) return false; HLoadKeyedFastElement* other_load = HLoadKeyedFastElement::cast(other); + if (is_dehoisted_ && index_offset_ != other_load->index_offset_) + return false; return hole_check_mode_ == other_load->hole_check_mode_; } private: HoleCheckMode hole_check_mode_; + uint32_t index_offset_; + bool is_dehoisted_; }; -class HLoadKeyedFastDoubleElement: public HTemplateInstruction<2> { +class HLoadKeyedFastDoubleElement + : public HTemplateInstruction<2>, public ArrayInstructionInterface { public: - HLoadKeyedFastDoubleElement(HValue* elements, HValue* key) { - SetOperandAt(0, elements); - SetOperandAt(1, key); - set_representation(Representation::Double()); + HLoadKeyedFastDoubleElement( + HValue* elements, + HValue* key, + HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK) + : index_offset_(0), + is_dehoisted_(false), + hole_check_mode_(hole_check_mode) { + SetOperandAt(0, elements); + SetOperandAt(1, key); + set_representation(Representation::Double()); SetGVNFlag(kDependsOnDoubleArrayElements); SetFlag(kUseGVN); - } + } HValue* elements() { return OperandAt(0); } HValue* key() { return OperandAt(1); } + uint32_t index_offset() { return index_offset_; } + void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; } + HValue* GetKey() { return key(); } + void SetKey(HValue* key) { SetOperandAt(1, key); } + bool IsDehoisted() { return is_dehoisted_; } + void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; } virtual Representation RequiredInputRepresentation(int index) { // The key is supposed to be Integer32. @@ -4010,21 +4039,38 @@ class HLoadKeyedFastDoubleElement: public HTemplateInstruction<2> { : Representation::Integer32(); } + bool RequiresHoleCheck() { + return hole_check_mode_ == PERFORM_HOLE_CHECK; + } + virtual void PrintDataTo(StringStream* stream); DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement) protected: - virtual bool DataEquals(HValue* other) { return true; } + virtual bool DataEquals(HValue* other) { + if (!other->IsLoadKeyedFastDoubleElement()) return false; + HLoadKeyedFastDoubleElement* other_load = + HLoadKeyedFastDoubleElement::cast(other); + return hole_check_mode_ == other_load->hole_check_mode_; + } + + private: + uint32_t index_offset_; + bool is_dehoisted_; + HoleCheckMode hole_check_mode_; }; -class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> { +class HLoadKeyedSpecializedArrayElement + : public HTemplateInstruction<2>, public ArrayInstructionInterface { public: HLoadKeyedSpecializedArrayElement(HValue* external_elements, HValue* key, ElementsKind elements_kind) - : elements_kind_(elements_kind) { + : elements_kind_(elements_kind), + index_offset_(0), + is_dehoisted_(false) { SetOperandAt(0, external_elements); SetOperandAt(1, key); if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || @@ -4052,6 +4098,12 @@ class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> { HValue* external_pointer() { return OperandAt(0); } HValue* key() { return OperandAt(1); } ElementsKind elements_kind() const { return elements_kind_; } + uint32_t index_offset() { return index_offset_; } + void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; } + HValue* GetKey() { return key(); } + void SetKey(HValue* key) { SetOperandAt(1, key); } + bool IsDehoisted() { return is_dehoisted_; } + void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; } virtual Range* InferRange(Zone* zone); @@ -4067,6 +4119,8 @@ class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> { private: ElementsKind elements_kind_; + uint32_t index_offset_; + bool is_dehoisted_; }; @@ -4188,11 +4242,12 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> { }; -class HStoreKeyedFastElement: public HTemplateInstruction<3> { +class HStoreKeyedFastElement + : public HTemplateInstruction<3>, public ArrayInstructionInterface { public: HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val, ElementsKind elements_kind = FAST_ELEMENTS) - : elements_kind_(elements_kind) { + : elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) { SetOperandAt(0, obj); SetOperandAt(1, key); SetOperandAt(2, val); @@ -4210,8 +4265,14 @@ class HStoreKeyedFastElement: public HTemplateInstruction<3> { HValue* key() { return OperandAt(1); } HValue* value() { return OperandAt(2); } bool value_is_smi() { - return elements_kind_ == FAST_SMI_ONLY_ELEMENTS; + return IsFastSmiElementsKind(elements_kind_); } + uint32_t index_offset() { return index_offset_; } + void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; } + HValue* GetKey() { return key(); } + void SetKey(HValue* key) { SetOperandAt(1, key); } + bool IsDehoisted() { return is_dehoisted_; } + void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; } bool NeedsWriteBarrier() { if (value_is_smi()) { @@ -4227,14 +4288,18 @@ class HStoreKeyedFastElement: public HTemplateInstruction<3> { private: ElementsKind elements_kind_; + uint32_t index_offset_; + bool is_dehoisted_; }; -class HStoreKeyedFastDoubleElement: public HTemplateInstruction<3> { +class HStoreKeyedFastDoubleElement + : public HTemplateInstruction<3>, public ArrayInstructionInterface { public: HStoreKeyedFastDoubleElement(HValue* elements, HValue* key, - HValue* val) { + HValue* val) + : index_offset_(0), is_dehoisted_(false) { SetOperandAt(0, elements); SetOperandAt(1, key); SetOperandAt(2, val); @@ -4254,6 +4319,12 @@ class HStoreKeyedFastDoubleElement: public HTemplateInstruction<3> { HValue* elements() { return OperandAt(0); } HValue* key() { return OperandAt(1); } HValue* value() { return OperandAt(2); } + uint32_t index_offset() { return index_offset_; } + void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; } + HValue* GetKey() { return key(); } + void SetKey(HValue* key) { SetOperandAt(1, key); } + bool IsDehoisted() { return is_dehoisted_; } + void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; } bool NeedsWriteBarrier() { return StoringValueNeedsWriteBarrier(value()); @@ -4264,16 +4335,21 @@ class HStoreKeyedFastDoubleElement: public HTemplateInstruction<3> { virtual void PrintDataTo(StringStream* stream); DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement) + + private: + uint32_t index_offset_; + bool is_dehoisted_; }; -class HStoreKeyedSpecializedArrayElement: public HTemplateInstruction<3> { +class HStoreKeyedSpecializedArrayElement + : public HTemplateInstruction<3>, public ArrayInstructionInterface { public: HStoreKeyedSpecializedArrayElement(HValue* external_elements, HValue* key, HValue* val, ElementsKind elements_kind) - : elements_kind_(elements_kind) { + : elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) { SetGVNFlag(kChangesSpecializedArrayElements); SetOperandAt(0, external_elements); SetOperandAt(1, key); @@ -4301,11 +4377,19 @@ class HStoreKeyedSpecializedArrayElement: public HTemplateInstruction<3> { HValue* key() { return OperandAt(1); } HValue* value() { return OperandAt(2); } ElementsKind elements_kind() const { return elements_kind_; } + uint32_t index_offset() { return index_offset_; } + void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; } + HValue* GetKey() { return key(); } + void SetKey(HValue* key) { SetOperandAt(1, key); } + bool IsDehoisted() { return is_dehoisted_; } + void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; } DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement) private: ElementsKind elements_kind_; + uint32_t index_offset_; + bool is_dehoisted_; }; @@ -4352,9 +4436,19 @@ class HTransitionElementsKind: public HTemplateInstruction<1> { transitioned_map_(transitioned_map) { SetOperandAt(0, object); SetFlag(kUseGVN); + // Don't set GVN DependOn flags here. That would defeat GVN's detection of + // congruent HTransitionElementsKind instructions. Instruction hoisting + // handles HTransitionElementsKind instruction specially, explicitly adding + // DependsOn flags during its dependency calculations. SetGVNFlag(kChangesElementsKind); - SetGVNFlag(kChangesElementsPointer); - SetGVNFlag(kChangesNewSpacePromotion); + if (original_map->has_fast_double_elements()) { + SetGVNFlag(kChangesElementsPointer); + SetGVNFlag(kChangesNewSpacePromotion); + } + if (transitioned_map->has_fast_double_elements()) { + SetGVNFlag(kChangesElementsPointer); + SetGVNFlag(kChangesNewSpacePromotion); + } set_representation(Representation::Tagged()); } @@ -4592,7 +4686,7 @@ class HArrayLiteral: public HMaterializedLiteral<1> { HValue* context() { return OperandAt(0); } ElementsKind boilerplate_elements_kind() const { if (!boilerplate_object_->IsJSObject()) { - return FAST_ELEMENTS; + return TERMINAL_FAST_ELEMENTS_KIND; } return Handle::cast(boilerplate_object_)->GetElementsKind(); } diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc index 3be001e..b9ad8af 100644 --- a/deps/v8/src/hydrogen.cc +++ b/deps/v8/src/hydrogen.cc @@ -1709,23 +1709,23 @@ void HGlobalValueNumberer::ProcessLoopBlock( bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags); if (instr->IsTransitionElementsKind()) { // It's possible to hoist transitions out of a loop as long as the - // hoisting wouldn't move the transition past a DependsOn of one of it's - // changes or any instructions that might change an objects map or - // elements contents. - GVNFlagSet changes = instr->ChangesFlags(); + // hoisting wouldn't move the transition past an instruction that has a + // DependsOn flag for anything it changes. GVNFlagSet hoist_depends_blockers = - HValue::ConvertChangesToDependsFlags(changes); - // In addition to not hoisting transitions above other instructions that - // change dependencies that the transition changes, it must not be - // hoisted above map changes and stores to an elements backing store - // that the transition might change. - GVNFlagSet hoist_change_blockers = changes; - hoist_change_blockers.Add(kChangesMaps); + HValue::ConvertChangesToDependsFlags(instr->ChangesFlags()); + + // In addition, the transition must not be hoisted above elements kind + // changes, or if the transition is destructive to the elements buffer, + // changes to array pointer or array contents. + GVNFlagSet hoist_change_blockers; + hoist_change_blockers.Add(kChangesElementsKind); HTransitionElementsKind* trans = HTransitionElementsKind::cast(instr); if (trans->original_map()->has_fast_double_elements()) { + hoist_change_blockers.Add(kChangesElementsPointer); hoist_change_blockers.Add(kChangesDoubleArrayElements); } if (trans->transitioned_map()->has_fast_double_elements()) { + hoist_change_blockers.Add(kChangesElementsPointer); hoist_change_blockers.Add(kChangesArrayElements); } if (FLAG_trace_gvn) { @@ -2758,6 +2758,7 @@ HGraph* HGraphBuilder::CreateGraph() { sce.Process(); graph()->EliminateRedundantBoundsChecks(); + graph()->DehoistSimpleArrayIndexComputations(); return graph(); } @@ -3016,7 +3017,6 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb, HBoundsCheck* check = HBoundsCheck::cast(i); check->ReplaceAllUsesWith(check->index()); - isolate()->counters()->array_bounds_checks_seen()->Increment(); if (!FLAG_array_bounds_checks_elimination) continue; int32_t offset; @@ -3035,10 +3035,8 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb, *data_p = bb_data_list; } else if (data->OffsetIsCovered(offset)) { check->DeleteAndReplaceWith(NULL); - isolate()->counters()->array_bounds_checks_removed()->Increment(); } else if (data->BasicBlock() == bb) { data->CoverCheck(check, offset); - isolate()->counters()->array_bounds_checks_removed()->Increment(); } else { int32_t new_lower_offset = offset < data->LowerOffset() ? offset @@ -3082,6 +3080,93 @@ void HGraph::EliminateRedundantBoundsChecks() { } +static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) { + HValue* index = array_operation->GetKey(); + + HConstant* constant; + HValue* subexpression; + int32_t sign; + if (index->IsAdd()) { + sign = 1; + HAdd* add = HAdd::cast(index); + if (add->left()->IsConstant()) { + subexpression = add->right(); + constant = HConstant::cast(add->left()); + } else if (add->right()->IsConstant()) { + subexpression = add->left(); + constant = HConstant::cast(add->right()); + } else { + return; + } + } else if (index->IsSub()) { + sign = -1; + HSub* sub = HSub::cast(index); + if (sub->left()->IsConstant()) { + subexpression = sub->right(); + constant = HConstant::cast(sub->left()); + } else if (sub->right()->IsConstant()) { + subexpression = sub->left(); + constant = HConstant::cast(sub->right()); + } return; + } else { + return; + } + + if (!constant->HasInteger32Value()) return; + int32_t value = constant->Integer32Value() * sign; + // We limit offset values to 30 bits because we want to avoid the risk of + // overflows when the offset is added to the object header size. + if (value >= 1 << 30 || value < 0) return; + array_operation->SetKey(subexpression); + if (index->HasNoUses()) { + index->DeleteAndReplaceWith(NULL); + } + ASSERT(value >= 0); + array_operation->SetIndexOffset(static_cast(value)); + array_operation->SetDehoisted(true); +} + + +void HGraph::DehoistSimpleArrayIndexComputations() { + if (!FLAG_array_index_dehoisting) return; + + HPhase phase("H_Dehoist index computations", this); + for (int i = 0; i < blocks()->length(); ++i) { + for (HInstruction* instr = blocks()->at(i)->first(); + instr != NULL; + instr = instr->next()) { + ArrayInstructionInterface* array_instruction = NULL; + if (instr->IsLoadKeyedFastElement()) { + HLoadKeyedFastElement* op = HLoadKeyedFastElement::cast(instr); + array_instruction = static_cast(op); + } else if (instr->IsLoadKeyedFastDoubleElement()) { + HLoadKeyedFastDoubleElement* op = + HLoadKeyedFastDoubleElement::cast(instr); + array_instruction = static_cast(op); + } else if (instr->IsLoadKeyedSpecializedArrayElement()) { + HLoadKeyedSpecializedArrayElement* op = + HLoadKeyedSpecializedArrayElement::cast(instr); + array_instruction = static_cast(op); + } else if (instr->IsStoreKeyedFastElement()) { + HStoreKeyedFastElement* op = HStoreKeyedFastElement::cast(instr); + array_instruction = static_cast(op); + } else if (instr->IsStoreKeyedFastDoubleElement()) { + HStoreKeyedFastDoubleElement* op = + HStoreKeyedFastDoubleElement::cast(instr); + array_instruction = static_cast(op); + } else if (instr->IsStoreKeyedSpecializedArrayElement()) { + HStoreKeyedSpecializedArrayElement* op = + HStoreKeyedSpecializedArrayElement::cast(instr); + array_instruction = static_cast(op); + } else { + continue; + } + DehoistArrayIndex(array_instruction); + } + } +} + + HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) { ASSERT(current_block() != NULL); current_block()->AddInstruction(instr); @@ -3881,7 +3966,7 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) { new(zone()) HLoadKeyedFastElement( environment()->ExpressionStackAt(2), // Enum cache. environment()->ExpressionStackAt(0), // Iteration index. - HLoadKeyedFastElement::OMIT_HOLE_CHECK)); + OMIT_HOLE_CHECK)); // Check if the expected map still matches that of the enumerable. // If not just deoptimize. @@ -4172,7 +4257,7 @@ static bool IsFastLiteral(Handle boilerplate, elements->map() != boilerplate->GetHeap()->fixed_cow_array_map()) { if (boilerplate->HasFastDoubleElements()) { *total_size += FixedDoubleArray::SizeFor(elements->length()); - } else if (boilerplate->HasFastElements()) { + } else if (boilerplate->HasFastObjectElements()) { Handle fast_elements = Handle::cast(elements); int length = elements->length(); for (int i = 0; i < length; i++) { @@ -4379,11 +4464,13 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) { Representation::Integer32())); switch (boilerplate_elements_kind) { - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: // Smi-only arrays need a smi check. AddInstruction(new(zone()) HCheckSmi(value)); // Fall through. case FAST_ELEMENTS: + case FAST_HOLEY_ELEMENTS: AddInstruction(new(zone()) HStoreKeyedFastElement( elements, key, @@ -4391,6 +4478,7 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) { boilerplate_elements_kind)); break; case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: AddInstruction(new(zone()) HStoreKeyedFastDoubleElement(elements, key, value)); @@ -5148,9 +5236,12 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: break; - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -5175,13 +5266,16 @@ HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements, ASSERT(val != NULL); switch (elements_kind) { case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: return new(zone()) HStoreKeyedFastDoubleElement( elements, checked_key, val); - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: // Smi-only arrays need a smi check. AddInstruction(new(zone()) HCheckSmi(val)); // Fall through. case FAST_ELEMENTS: + case FAST_HOLEY_ELEMENTS: return new(zone()) HStoreKeyedFastElement( elements, checked_key, val, elements_kind); default: @@ -5190,10 +5284,13 @@ HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements, } } // It's an element load (!is_store). - if (elements_kind == FAST_DOUBLE_ELEMENTS) { - return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key); - } else { // FAST_ELEMENTS or FAST_SMI_ONLY_ELEMENTS. - return new(zone()) HLoadKeyedFastElement(elements, checked_key); + HoleCheckMode mode = IsFastPackedElementsKind(elements_kind) ? + OMIT_HOLE_CHECK : + PERFORM_HOLE_CHECK; + if (IsFastDoubleElementsKind(elements_kind)) { + return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key, mode); + } else { // Smi or Object elements. + return new(zone()) HLoadKeyedFastElement(elements, checked_key, mode); } } @@ -5201,15 +5298,30 @@ HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements, HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object, HValue* key, HValue* val, + HValue* dependency, Handle map, bool is_store) { - HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMaps(object, map)); - bool fast_smi_only_elements = map->has_fast_smi_only_elements(); - bool fast_elements = map->has_fast_elements(); + HInstruction* mapcheck = + AddInstruction(new(zone()) HCheckMaps(object, map, dependency)); + // No GVNFlag is necessary for ElementsKind if there is an explicit dependency + // on a HElementsTransition instruction. The flag can also be removed if the + // map to check has FAST_HOLEY_ELEMENTS, since there can be no further + // ElementsKind transitions. Finally, the dependency can be removed for stores + // for FAST_ELEMENTS, since a transition to HOLEY elements won't change the + // generated store code. + if (dependency || + (map->elements_kind() == FAST_HOLEY_ELEMENTS) || + (map->elements_kind() == FAST_ELEMENTS && is_store)) { + mapcheck->ClearGVNFlag(kDependsOnElementsKind); + } + bool fast_smi_only_elements = map->has_fast_smi_elements(); + bool fast_elements = map->has_fast_object_elements(); HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object)); if (is_store && (fast_elements || fast_smi_only_elements)) { - AddInstruction(new(zone()) HCheckMaps( - elements, isolate()->factory()->fixed_array_map())); + HCheckMaps* check_cow_map = new(zone()) HCheckMaps( + elements, isolate()->factory()->fixed_array_map()); + check_cow_map->ClearGVNFlag(kDependsOnElementsKind); + AddInstruction(check_cow_map); } HInstruction* length = NULL; HInstruction* checked_key = NULL; @@ -5262,8 +5374,8 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object, for (int i = 0; i < maps->length(); ++i) { Handle map = maps->at(i); ElementsKind elements_kind = map->elements_kind(); - if (elements_kind == FAST_DOUBLE_ELEMENTS || - elements_kind == FAST_ELEMENTS) { + if (IsFastElementsKind(elements_kind) && + elements_kind != GetInitialFastElementsKind()) { possible_transitioned_maps.Add(map); } } @@ -5277,12 +5389,17 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object, int num_untransitionable_maps = 0; Handle untransitionable_map; + HTransitionElementsKind* transition = NULL; for (int i = 0; i < maps->length(); ++i) { Handle map = maps->at(i); ASSERT(map->IsMap()); if (!transition_target.at(i).is_null()) { - AddInstruction(new(zone()) HTransitionElementsKind( - object, map, transition_target.at(i))); + ASSERT(Map::IsValidElementsTransition( + map->elements_kind(), + transition_target.at(i)->elements_kind())); + transition = new(zone()) HTransitionElementsKind( + object, map, transition_target.at(i)); + AddInstruction(transition); } else { type_todo[map->elements_kind()] = true; if (map->elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND) { @@ -5302,7 +5419,7 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object, : BuildLoadKeyedGeneric(object, key)); } else { instr = AddInstruction(BuildMonomorphicElementAccess( - object, key, val, untransitionable_map, is_store)); + object, key, val, transition, untransitionable_map, is_store)); } *has_side_effects |= instr->HasObservableSideEffects(); instr->set_position(position); @@ -5319,20 +5436,18 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object, HLoadExternalArrayPointer* external_elements = NULL; HInstruction* checked_key = NULL; - // Generated code assumes that FAST_SMI_ONLY_ELEMENTS, FAST_ELEMENTS, - // FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS are handled before external - // arrays. - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND); - STATIC_ASSERT(FAST_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND); + // Generated code assumes that FAST_* and DICTIONARY_ELEMENTS ElementsKinds + // are handled before external arrays. + STATIC_ASSERT(FAST_SMI_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND); + STATIC_ASSERT(FAST_HOLEY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND); STATIC_ASSERT(FAST_DOUBLE_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND); STATIC_ASSERT(DICTIONARY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND); for (ElementsKind elements_kind = FIRST_ELEMENTS_KIND; elements_kind <= LAST_ELEMENTS_KIND; elements_kind = ElementsKind(elements_kind + 1)) { - // After having handled FAST_ELEMENTS, FAST_SMI_ONLY_ELEMENTS, - // FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS, we need to add some code - // that's executed for all external array cases. + // After having handled FAST_* and DICTIONARY_ELEMENTS, we need to add some + // code that's executed for all external array cases. STATIC_ASSERT(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND == LAST_ELEMENTS_KIND); if (elements_kind == FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND @@ -5354,10 +5469,8 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object, set_current_block(if_true); HInstruction* access; - if (elements_kind == FAST_SMI_ONLY_ELEMENTS || - elements_kind == FAST_ELEMENTS || - elements_kind == FAST_DOUBLE_ELEMENTS) { - if (is_store && elements_kind != FAST_DOUBLE_ELEMENTS) { + if (IsFastElementsKind(elements_kind)) { + if (is_store && !IsFastDoubleElementsKind(elements_kind)) { AddInstruction(new(zone()) HCheckMaps( elements, isolate()->factory()->fixed_array_map(), elements_kind_branch)); @@ -5444,7 +5557,7 @@ HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj, : BuildLoadKeyedGeneric(obj, key); } else { AddInstruction(new(zone()) HCheckNonSmi(obj)); - instr = BuildMonomorphicElementAccess(obj, key, val, map, is_store); + instr = BuildMonomorphicElementAccess(obj, key, val, NULL, map, is_store); } } else if (expr->GetReceiverTypes() != NULL && !expr->GetReceiverTypes()->is_empty()) { @@ -8135,14 +8248,6 @@ void HGraphBuilder::GenerateNumberToString(CallRuntime* call) { } -// Fast swapping of elements. Takes three expressions, the object and two -// indices. This should only be used if the indices are known to be -// non-negative and within bounds of the elements array at the call site. -void HGraphBuilder::GenerateSwapElements(CallRuntime* call) { - return Bailout("inlined runtime function: SwapElements"); -} - - // Fast call for custom callbacks. void HGraphBuilder::GenerateCallFunction(CallRuntime* call) { // 1 ~ The function to call is not itself an argument to the call. diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h index 909d07b..5c8ddbf 100644 --- a/deps/v8/src/hydrogen.h +++ b/deps/v8/src/hydrogen.h @@ -267,6 +267,7 @@ class HGraph: public ZoneObject { void AssignDominators(); void ReplaceCheckedValues(); void EliminateRedundantBoundsChecks(); + void DehoistSimpleArrayIndexComputations(); void PropagateDeoptimizingMark(); // Returns false if there are phi-uses of the arguments-object @@ -1092,6 +1093,7 @@ class HGraphBuilder: public AstVisitor { HInstruction* BuildMonomorphicElementAccess(HValue* object, HValue* key, HValue* val, + HValue* dependency, Handle map, bool is_store); HValue* HandlePolymorphicElementAccess(HValue* object, diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc index a36763d..be46ff2 100644 --- a/deps/v8/src/ia32/builtins-ia32.cc +++ b/deps/v8/src/ia32/builtins-ia32.cc @@ -900,7 +900,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm, const int initial_capacity = JSArray::kPreallocatedArrayElements; STATIC_ASSERT(initial_capacity >= 0); - __ LoadInitialArrayMap(array_function, scratch2, scratch1); + __ LoadInitialArrayMap(array_function, scratch2, scratch1, false); // Allocate the JSArray object together with space for a fixed array with the // requested elements. @@ -1003,7 +1003,8 @@ static void AllocateJSArray(MacroAssembler* masm, ASSERT(!fill_with_hole || array_size.is(ecx)); // rep stos count ASSERT(!fill_with_hole || !result.is(eax)); // result is never eax - __ LoadInitialArrayMap(array_function, scratch, elements_array); + __ LoadInitialArrayMap(array_function, scratch, + elements_array, fill_with_hole); // Allocate the JSArray object together with space for a FixedArray with the // requested elements. @@ -1274,11 +1275,11 @@ static void ArrayNativeCode(MacroAssembler* masm, __ jmp(&prepare_generic_code_call); __ bind(¬_double); - // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS. + // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS. __ mov(ebx, Operand(esp, 0)); __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset)); __ LoadTransitionedArrayMapConditional( - FAST_SMI_ONLY_ELEMENTS, + FAST_SMI_ELEMENTS, FAST_ELEMENTS, edi, eax, diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc index a1c6edd..df04b28 100644 --- a/deps/v8/src/ia32/code-stubs-ia32.cc +++ b/deps/v8/src/ia32/code-stubs-ia32.cc @@ -3822,20 +3822,24 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ IncrementCounter(counters->regexp_entry_native(), 1); // Isolates: note we add an additional parameter here (isolate pointer). - static const int kRegExpExecuteArguments = 8; + static const int kRegExpExecuteArguments = 9; __ EnterApiExitFrame(kRegExpExecuteArguments); - // Argument 8: Pass current isolate address. - __ mov(Operand(esp, 7 * kPointerSize), + // Argument 9: Pass current isolate address. + __ mov(Operand(esp, 8 * kPointerSize), Immediate(ExternalReference::isolate_address())); - // Argument 7: Indicate that this is a direct call from JavaScript. - __ mov(Operand(esp, 6 * kPointerSize), Immediate(1)); + // Argument 8: Indicate that this is a direct call from JavaScript. + __ mov(Operand(esp, 7 * kPointerSize), Immediate(1)); - // Argument 6: Start (high end) of backtracking stack memory area. + // Argument 7: Start (high end) of backtracking stack memory area. __ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address)); __ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size)); - __ mov(Operand(esp, 5 * kPointerSize), esi); + __ mov(Operand(esp, 6 * kPointerSize), esi); + + // Argument 6: Set the number of capture registers to zero to force global + // regexps to behave as non-global. This does not affect non-global regexps. + __ mov(Operand(esp, 5 * kPointerSize), Immediate(0)); // Argument 5: static offsets vector buffer. __ mov(Operand(esp, 4 * kPointerSize), @@ -3898,7 +3902,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Check the result. Label success; - __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS); + __ cmp(eax, 1); + // We expect exactly one result since we force the called regexp to behave + // as non-global. __ j(equal, &success); Label failure; __ cmp(eax, NativeRegExpMacroAssembler::FAILURE); @@ -7057,8 +7063,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { // KeyedStoreStubCompiler::GenerateStoreFastElement. { REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET}, { REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET}, - // ElementsTransitionGenerator::GenerateSmiOnlyToObject - // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble + // ElementsTransitionGenerator::GenerateMapChangeElementTransition + // and ElementsTransitionGenerator::GenerateSmiToDouble // and ElementsTransitionGenerator::GenerateDoubleToObject { REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET}, { REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET}, @@ -7330,9 +7336,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { __ CheckFastElements(edi, &double_elements); - // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS + // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements __ JumpIfSmi(eax, &smi_element); - __ CheckFastSmiOnlyElements(edi, &fast_elements, Label::kNear); + __ CheckFastSmiElements(edi, &fast_elements, Label::kNear); // Store into the array literal requires a elements transition. Call into // the runtime. @@ -7354,7 +7360,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { __ pop(edx); __ jmp(&slow_elements); - // Array literal has ElementsKind of FAST_ELEMENTS and value is an object. + // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. __ bind(&fast_elements); __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset)); __ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size, @@ -7367,15 +7373,15 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { OMIT_SMI_CHECK); __ ret(0); - // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or - // FAST_ELEMENTS, and value is Smi. + // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, + // and value is Smi. __ bind(&smi_element); __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset)); __ mov(FieldOperand(ebx, ecx, times_half_pointer_size, FixedArrayBase::kHeaderSize), eax); __ ret(0); - // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS. + // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS. __ bind(&double_elements); __ push(edx); diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc index cff6454..eb68687 100644 --- a/deps/v8/src/ia32/codegen-ia32.cc +++ b/deps/v8/src/ia32/codegen-ia32.cc @@ -351,7 +351,7 @@ OS::MemCopyFunction CreateMemCopyFunction() { #define __ ACCESS_MASM(masm) -void ElementsTransitionGenerator::GenerateSmiOnlyToObject( +void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( MacroAssembler* masm) { // ----------- S t a t e ------------- // -- eax : value @@ -372,7 +372,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToObject( } -void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( +void ElementsTransitionGenerator::GenerateSmiToDouble( MacroAssembler* masm, Label* fail) { // ----------- S t a t e ------------- // -- eax : value diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc index 901e38b..d153e18 100644 --- a/deps/v8/src/ia32/debug-ia32.cc +++ b/deps/v8/src/ia32/debug-ia32.cc @@ -175,7 +175,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, // Read current padding counter and skip corresponding number of words. __ pop(unused_reg); // We divide stored value by 2 (untagging) and multiply it by word's size. - STATIC_ASSERT(kSmiTagSize == 1); + STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0); __ lea(esp, Operand(esp, unused_reg, times_half_pointer_size, 0)); // Get rid of the internal frame. diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc index 10fe77b..9727ea0 100644 --- a/deps/v8/src/ia32/full-codegen-ia32.cc +++ b/deps/v8/src/ia32/full-codegen-ia32.cc @@ -1649,7 +1649,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { ASSERT_EQ(2, constant_elements->length()); ElementsKind constant_elements_kind = static_cast(Smi::cast(constant_elements->get(0))->value()); - bool has_constant_fast_elements = constant_elements_kind == FAST_ELEMENTS; + bool has_constant_fast_elements = + IsFastObjectElementsKind(constant_elements_kind); Handle constant_elements_values( FixedArrayBase::cast(constant_elements->get(1))); @@ -1660,7 +1661,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { Heap* heap = isolate()->heap(); if (has_constant_fast_elements && constant_elements_values->map() == heap->fixed_cow_array_map()) { - // If the elements are already FAST_ELEMENTS, the boilerplate cannot + // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot // change, so it's possible to specialize the stub in advance. __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1); FastCloneShallowArrayStub stub( @@ -1672,10 +1673,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3); } else { - ASSERT(constant_elements_kind == FAST_ELEMENTS || - constant_elements_kind == FAST_SMI_ONLY_ELEMENTS || + ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) || FLAG_smi_only_arrays); - // If the elements are already FAST_ELEMENTS, the boilerplate cannot + // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot // change, so it's possible to specialize the stub in advance. FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements ? FastCloneShallowArrayStub::CLONE_ELEMENTS @@ -1703,9 +1703,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { } VisitForAccumulatorValue(subexpr); - if (constant_elements_kind == FAST_ELEMENTS) { - // Fast-case array literal with ElementsKind of FAST_ELEMENTS, they cannot - // transition and don't need to call the runtime stub. + if (IsFastObjectElementsKind(constant_elements_kind)) { + // Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they + // cannot transition and don't need to call the runtime stub. int offset = FixedArray::kHeaderSize + (i * kPointerSize); __ mov(ebx, Operand(esp, 0)); // Copy of array literal. __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset)); @@ -3405,99 +3405,6 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) { } -void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) { - ZoneList* args = expr->arguments(); - ASSERT(args->length() == 3); - VisitForStackValue(args->at(0)); - VisitForStackValue(args->at(1)); - VisitForStackValue(args->at(2)); - Label done; - Label slow_case; - Register object = eax; - Register index_1 = ebx; - Register index_2 = ecx; - Register elements = edi; - Register temp = edx; - __ mov(object, Operand(esp, 2 * kPointerSize)); - // Fetch the map and check if array is in fast case. - // Check that object doesn't require security checks and - // has no indexed interceptor. - __ CmpObjectType(object, JS_ARRAY_TYPE, temp); - __ j(not_equal, &slow_case); - __ test_b(FieldOperand(temp, Map::kBitFieldOffset), - KeyedLoadIC::kSlowCaseBitFieldMask); - __ j(not_zero, &slow_case); - - // Check the object's elements are in fast case and writable. - __ mov(elements, FieldOperand(object, JSObject::kElementsOffset)); - __ cmp(FieldOperand(elements, HeapObject::kMapOffset), - Immediate(isolate()->factory()->fixed_array_map())); - __ j(not_equal, &slow_case); - - // Check that both indices are smis. - __ mov(index_1, Operand(esp, 1 * kPointerSize)); - __ mov(index_2, Operand(esp, 0)); - __ mov(temp, index_1); - __ or_(temp, index_2); - __ JumpIfNotSmi(temp, &slow_case); - - // Check that both indices are valid. - __ mov(temp, FieldOperand(object, JSArray::kLengthOffset)); - __ cmp(temp, index_1); - __ j(below_equal, &slow_case); - __ cmp(temp, index_2); - __ j(below_equal, &slow_case); - - // Bring addresses into index1 and index2. - __ lea(index_1, CodeGenerator::FixedArrayElementOperand(elements, index_1)); - __ lea(index_2, CodeGenerator::FixedArrayElementOperand(elements, index_2)); - - // Swap elements. Use object and temp as scratch registers. - __ mov(object, Operand(index_1, 0)); - __ mov(temp, Operand(index_2, 0)); - __ mov(Operand(index_2, 0), object); - __ mov(Operand(index_1, 0), temp); - - Label no_remembered_set; - __ CheckPageFlag(elements, - temp, - 1 << MemoryChunk::SCAN_ON_SCAVENGE, - not_zero, - &no_remembered_set, - Label::kNear); - // Possible optimization: do a check that both values are Smis - // (or them and test against Smi mask.) - - // We are swapping two objects in an array and the incremental marker never - // pauses in the middle of scanning a single object. Therefore the - // incremental marker is not disturbed, so we don't need to call the - // RecordWrite stub that notifies the incremental marker. - __ RememberedSetHelper(elements, - index_1, - temp, - kDontSaveFPRegs, - MacroAssembler::kFallThroughAtEnd); - __ RememberedSetHelper(elements, - index_2, - temp, - kDontSaveFPRegs, - MacroAssembler::kFallThroughAtEnd); - - __ bind(&no_remembered_set); - - // We are done. Drop elements from the stack, and return undefined. - __ add(esp, Immediate(3 * kPointerSize)); - __ mov(eax, isolate()->factory()->undefined_value()); - __ jmp(&done); - - __ bind(&slow_case); - __ CallRuntime(Runtime::kSwapElements, 3); - - __ bind(&done); - context()->Plug(eax); -} - - void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList* args = expr->arguments(); ASSERT_EQ(2, args->length()); diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc index dc64a09..a091ff1 100644 --- a/deps/v8/src/ia32/ic-ia32.cc +++ b/deps/v8/src/ia32/ic-ia32.cc @@ -889,25 +889,25 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, &non_double_value, DONT_DO_SMI_CHECK); - // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS -> - // FAST_DOUBLE_ELEMENTS and complete the store. - __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS + // and complete the store. + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, ebx, edi, &slow); - ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow); + ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow); __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); __ jmp(&fast_double_without_map_check); __ bind(&non_double_value); - // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS - __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, ebx, edi, &slow); - ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm); + ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm); __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); __ jmp(&finish_object_store); @@ -1622,7 +1622,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) { // Must return the modified receiver in eax. if (!FLAG_trace_elements_transitions) { Label fail; - ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail); + ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail); __ mov(eax, edx); __ Ret(); __ bind(&fail); diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc index 455c502..18f2a39 100644 --- a/deps/v8/src/ia32/lithium-codegen-ia32.cc +++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc @@ -2377,8 +2377,10 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) { __ movzx_b(temp, FieldOperand(temp, Map::kBitField2Offset)); __ and_(temp, Map::kElementsKindMask); __ shr(temp, Map::kElementsKindShift); - __ cmp(temp, FAST_ELEMENTS); - __ j(equal, &ok, Label::kNear); + __ cmp(temp, GetInitialFastElementsKind()); + __ j(less, &fail, Label::kNear); + __ cmp(temp, TERMINAL_FAST_ELEMENTS_KIND); + __ j(less_equal, &ok, Label::kNear); __ cmp(temp, FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND); __ j(less, &fail, Label::kNear); __ cmp(temp, LAST_EXTERNAL_ARRAY_ELEMENTS_KIND); @@ -2421,9 +2423,11 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { // Load the result. __ mov(result, - BuildFastArrayOperand(instr->elements(), instr->key(), + BuildFastArrayOperand(instr->elements(), + instr->key(), FAST_ELEMENTS, - FixedArray::kHeaderSize - kHeapObjectTag)); + FixedArray::kHeaderSize - kHeapObjectTag, + instr->additional_index())); // Check for the hole value. if (instr->hydrogen()->RequiresHoleCheck()) { @@ -2437,18 +2441,24 @@ void LCodeGen::DoLoadKeyedFastDoubleElement( LLoadKeyedFastDoubleElement* instr) { XMMRegister result = ToDoubleRegister(instr->result()); - int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + - sizeof(kHoleNanLower32); - Operand hole_check_operand = BuildFastArrayOperand( - instr->elements(), instr->key(), - FAST_DOUBLE_ELEMENTS, - offset); - __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); - DeoptimizeIf(equal, instr->environment()); + if (instr->hydrogen()->RequiresHoleCheck()) { + int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + + sizeof(kHoleNanLower32); + Operand hole_check_operand = BuildFastArrayOperand( + instr->elements(), instr->key(), + FAST_DOUBLE_ELEMENTS, + offset, + instr->additional_index()); + __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); + DeoptimizeIf(equal, instr->environment()); + } Operand double_load_operand = BuildFastArrayOperand( - instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS, - FixedDoubleArray::kHeaderSize - kHeapObjectTag); + instr->elements(), + instr->key(), + FAST_DOUBLE_ELEMENTS, + FixedDoubleArray::kHeaderSize - kHeapObjectTag, + instr->additional_index()); __ movdbl(result, double_load_operand); } @@ -2457,7 +2467,8 @@ Operand LCodeGen::BuildFastArrayOperand( LOperand* elements_pointer, LOperand* key, ElementsKind elements_kind, - uint32_t offset) { + uint32_t offset, + uint32_t additional_index) { Register elements_pointer_reg = ToRegister(elements_pointer); int shift_size = ElementsKindToShiftSize(elements_kind); if (key->IsConstantOperand()) { @@ -2466,10 +2477,14 @@ Operand LCodeGen::BuildFastArrayOperand( Abort("array index constant value too big"); } return Operand(elements_pointer_reg, - constant_value * (1 << shift_size) + offset); + ((constant_value + additional_index) << shift_size) + + offset); } else { ScaleFactor scale_factor = static_cast(shift_size); - return Operand(elements_pointer_reg, ToRegister(key), scale_factor, offset); + return Operand(elements_pointer_reg, + ToRegister(key), + scale_factor, + offset + (additional_index << shift_size)); } } @@ -2478,7 +2493,10 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( LLoadKeyedSpecializedArrayElement* instr) { ElementsKind elements_kind = instr->elements_kind(); Operand operand(BuildFastArrayOperand(instr->external_pointer(), - instr->key(), elements_kind, 0)); + instr->key(), + elements_kind, + 0, + instr->additional_index())); if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { XMMRegister result(ToDoubleRegister(instr->result())); __ movss(result, operand); @@ -2514,9 +2532,12 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( break; case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3415,7 +3436,10 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( LStoreKeyedSpecializedArrayElement* instr) { ElementsKind elements_kind = instr->elements_kind(); Operand operand(BuildFastArrayOperand(instr->external_pointer(), - instr->key(), elements_kind, 0)); + instr->key(), + elements_kind, + 0, + instr->additional_index())); if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value())); __ movss(operand, xmm0); @@ -3439,9 +3463,12 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( break; case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3456,31 +3483,21 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { Register elements = ToRegister(instr->object()); Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; - // Do the store. - if (instr->key()->IsConstantOperand()) { - ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); - LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - int offset = - ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize; - __ mov(FieldOperand(elements, offset), value); - } else { - __ mov(FieldOperand(elements, - key, - times_pointer_size, - FixedArray::kHeaderSize), - value); - } + Operand operand = BuildFastArrayOperand( + instr->object(), + instr->key(), + FAST_ELEMENTS, + FixedArray::kHeaderSize - kHeapObjectTag, + instr->additional_index()); + __ mov(operand, value); if (instr->hydrogen()->NeedsWriteBarrier()) { + ASSERT(!instr->key()->IsConstantOperand()); HType type = instr->hydrogen()->value()->type(); SmiCheck check_needed = type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; // Compute address of modified element and store it into key register. - __ lea(key, - FieldOperand(elements, - key, - times_pointer_size, - FixedArray::kHeaderSize)); + __ lea(key, operand); __ RecordWrite(elements, key, value, @@ -3508,8 +3525,11 @@ void LCodeGen::DoStoreKeyedFastDoubleElement( } Operand double_store_operand = BuildFastArrayOperand( - instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS, - FixedDoubleArray::kHeaderSize - kHeapObjectTag); + instr->elements(), + instr->key(), + FAST_DOUBLE_ELEMENTS, + FixedDoubleArray::kHeaderSize - kHeapObjectTag, + instr->additional_index()); __ movdbl(double_store_operand, value); } @@ -3540,22 +3560,23 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map); __ j(not_equal, ¬_applicable); __ mov(new_map_reg, to_map); - if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) { + if (IsSimpleMapChangeTransition(from_kind, to_kind)) { Register object_reg = ToRegister(instr->object()); __ mov(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg); // Write barrier. ASSERT_NE(instr->temp_reg(), NULL); __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, ToRegister(instr->temp_reg()), kDontSaveFPRegs); - } else if (from_kind == FAST_SMI_ONLY_ELEMENTS && - to_kind == FAST_DOUBLE_ELEMENTS) { + } else if (IsFastSmiElementsKind(from_kind) && + IsFastDoubleElementsKind(to_kind)) { Register fixed_object_reg = ToRegister(instr->temp_reg()); ASSERT(fixed_object_reg.is(edx)); ASSERT(new_map_reg.is(ebx)); __ mov(fixed_object_reg, object_reg); CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(), RelocInfo::CODE_TARGET, instr); - } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) { + } else if (IsFastDoubleElementsKind(from_kind) && + IsFastObjectElementsKind(to_kind)) { Register fixed_object_reg = ToRegister(instr->temp_reg()); ASSERT(fixed_object_reg.is(edx)); ASSERT(new_map_reg.is(ebx)); @@ -4415,8 +4436,9 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { // Deopt if the array literal boilerplate ElementsKind is of a type different // than the expected one. The check isn't necessary if the boilerplate has - // already been converted to FAST_ELEMENTS. - if (boilerplate_elements_kind != FAST_ELEMENTS) { + // already been converted to TERMINAL_FAST_ELEMENTS_KIND. + if (CanTransitionToMoreGeneralFastElementsKind( + boilerplate_elements_kind, true)) { __ LoadHeapObject(eax, instr->hydrogen()->boilerplate_object()); __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); // Load the map's "bit field 2". We only need the first byte, @@ -4578,8 +4600,9 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) { // Deopt if the literal boilerplate ElementsKind is of a type different than // the expected one. The check isn't necessary if the boilerplate has already - // been converted to FAST_ELEMENTS. - if (boilerplate_elements_kind != FAST_ELEMENTS) { + // already been converted to TERMINAL_FAST_ELEMENTS_KIND. + if (CanTransitionToMoreGeneralFastElementsKind( + boilerplate_elements_kind, true)) { __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate()); __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset)); // Load the map's "bit field 2". We only need the first byte, diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h index a2810f0..392bca2 100644 --- a/deps/v8/src/ia32/lithium-codegen-ia32.h +++ b/deps/v8/src/ia32/lithium-codegen-ia32.h @@ -242,7 +242,8 @@ class LCodeGen BASE_EMBEDDED { Operand BuildFastArrayOperand(LOperand* elements_pointer, LOperand* key, ElementsKind elements_kind, - uint32_t offset); + uint32_t offset, + uint32_t additional_index = 0); // Specific math operations - used from DoUnaryMathOperation. void EmitIntegerMathAbs(LUnaryMathOperation* instr); diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc index 5adaf43..fbc75d0 100644 --- a/deps/v8/src/ia32/lithium-ia32.cc +++ b/deps/v8/src/ia32/lithium-ia32.cc @@ -1990,8 +1990,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement( LOperand* external_pointer = UseRegister(instr->external_pointer()); LOperand* key = UseRegisterOrConstant(instr->key()); LLoadKeyedSpecializedArrayElement* result = - new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, - key); + new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key); LInstruction* load_instr = DefineAsRegister(result); // An unsigned int array load might overflow and cause a deopt, make sure it // has an environment. @@ -2093,8 +2092,9 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LInstruction* LChunkBuilder::DoTransitionElementsKind( HTransitionElementsKind* instr) { - if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS && - instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) { + ElementsKind from_kind = instr->original_map()->elements_kind(); + ElementsKind to_kind = instr->transitioned_map()->elements_kind(); + if (IsSimpleMapChangeTransition(from_kind, to_kind)) { LOperand* object = UseRegister(instr->object()); LOperand* new_map_reg = TempRegister(); LOperand* temp_reg = TempRegister(); diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h index 09f0b0d..be64b2f 100644 --- a/deps/v8/src/ia32/lithium-ia32.h +++ b/deps/v8/src/ia32/lithium-ia32.h @@ -1238,13 +1238,13 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> { LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> { public: - LLoadKeyedFastDoubleElement(LOperand* elements, - LOperand* key) { + LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) { inputs_[0] = elements; inputs_[1] = key; } @@ -1255,13 +1255,13 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> { LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { public: - LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, - LOperand* key) { + LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) { inputs_[0] = external_pointer; inputs_[1] = key; } @@ -1275,6 +1275,7 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1775,6 +1776,7 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> { LOperand* object() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } LOperand* value() { return inputs_[2]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1797,6 +1799,7 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> { LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } LOperand* value() { return inputs_[2]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } }; @@ -1822,6 +1825,7 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> { ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc index c31b0c2..1b61486 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/macro-assembler-ia32.cc @@ -382,10 +382,12 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) { void MacroAssembler::CheckFastElements(Register map, Label* fail, Label::Distance distance) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); - STATIC_ASSERT(FAST_ELEMENTS == 1); + STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); + STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); + STATIC_ASSERT(FAST_ELEMENTS == 2); + STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); cmpb(FieldOperand(map, Map::kBitField2Offset), - Map::kMaximumBitField2FastElementValue); + Map::kMaximumBitField2FastHoleyElementValue); j(above, fail, distance); } @@ -393,23 +395,26 @@ void MacroAssembler::CheckFastElements(Register map, void MacroAssembler::CheckFastObjectElements(Register map, Label* fail, Label::Distance distance) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); - STATIC_ASSERT(FAST_ELEMENTS == 1); + STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); + STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); + STATIC_ASSERT(FAST_ELEMENTS == 2); + STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); cmpb(FieldOperand(map, Map::kBitField2Offset), - Map::kMaximumBitField2FastSmiOnlyElementValue); + Map::kMaximumBitField2FastHoleySmiElementValue); j(below_equal, fail, distance); cmpb(FieldOperand(map, Map::kBitField2Offset), - Map::kMaximumBitField2FastElementValue); + Map::kMaximumBitField2FastHoleyElementValue); j(above, fail, distance); } -void MacroAssembler::CheckFastSmiOnlyElements(Register map, - Label* fail, - Label::Distance distance) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); +void MacroAssembler::CheckFastSmiElements(Register map, + Label* fail, + Label::Distance distance) { + STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); + STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); cmpb(FieldOperand(map, Map::kBitField2Offset), - Map::kMaximumBitField2FastSmiOnlyElementValue); + Map::kMaximumBitField2FastHoleySmiElementValue); j(above, fail, distance); } @@ -493,24 +498,18 @@ void MacroAssembler::CompareMap(Register obj, CompareMapMode mode) { cmp(FieldOperand(obj, HeapObject::kMapOffset), map); if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) { - Map* transitioned_fast_element_map( - map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL)); - ASSERT(transitioned_fast_element_map == NULL || - map->elements_kind() != FAST_ELEMENTS); - if (transitioned_fast_element_map != NULL) { - j(equal, early_success, Label::kNear); - cmp(FieldOperand(obj, HeapObject::kMapOffset), - Handle(transitioned_fast_element_map)); - } - - Map* transitioned_double_map( - map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL)); - ASSERT(transitioned_double_map == NULL || - map->elements_kind() == FAST_SMI_ONLY_ELEMENTS); - if (transitioned_double_map != NULL) { - j(equal, early_success, Label::kNear); - cmp(FieldOperand(obj, HeapObject::kMapOffset), - Handle(transitioned_double_map)); + ElementsKind kind = map->elements_kind(); + if (IsFastElementsKind(kind)) { + bool packed = IsFastPackedElementsKind(kind); + Map* current_map = *map; + while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) { + kind = GetNextMoreGeneralFastElementsKind(kind, packed); + current_map = current_map->LookupElementsTransitionMap(kind, NULL); + if (!current_map) break; + j(equal, early_success, Label::kNear); + cmp(FieldOperand(obj, HeapObject::kMapOffset), + Handle(current_map)); + } } } } @@ -2161,27 +2160,38 @@ void MacroAssembler::LoadTransitionedArrayMapConditional( mov(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset)); // Check that the function's map is the same as the expected cached map. - int expected_index = - Context::GetContextMapIndexFromElementsKind(expected_kind); - cmp(map_in_out, Operand(scratch, Context::SlotOffset(expected_index))); + mov(scratch, Operand(scratch, + Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX))); + + size_t offset = expected_kind * kPointerSize + + FixedArrayBase::kHeaderSize; + cmp(map_in_out, FieldOperand(scratch, offset)); j(not_equal, no_map_match); // Use the transitioned cached map. - int trans_index = - Context::GetContextMapIndexFromElementsKind(transitioned_kind); - mov(map_in_out, Operand(scratch, Context::SlotOffset(trans_index))); + offset = transitioned_kind * kPointerSize + + FixedArrayBase::kHeaderSize; + mov(map_in_out, FieldOperand(scratch, offset)); } void MacroAssembler::LoadInitialArrayMap( - Register function_in, Register scratch, Register map_out) { + Register function_in, Register scratch, + Register map_out, bool can_have_holes) { ASSERT(!function_in.is(map_out)); Label done; mov(map_out, FieldOperand(function_in, JSFunction::kPrototypeOrInitialMapOffset)); if (!FLAG_smi_only_arrays) { - LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, - FAST_ELEMENTS, + ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS; + LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, + kind, + map_out, + scratch, + &done); + } else if (can_have_holes) { + LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, + FAST_HOLEY_SMI_ELEMENTS, map_out, scratch, &done); diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h index 66d1ce7..c71cad8 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.h +++ b/deps/v8/src/ia32/macro-assembler-ia32.h @@ -235,7 +235,8 @@ class MacroAssembler: public Assembler { // Load the initial map for new Arrays from a JSFunction. void LoadInitialArrayMap(Register function_in, Register scratch, - Register map_out); + Register map_out, + bool can_have_holes); // Load the global function with the given index. void LoadGlobalFunction(int index, Register function); @@ -357,9 +358,9 @@ class MacroAssembler: public Assembler { // Check if a map for a JSObject indicates that the object has fast smi only // elements. Jump to the specified label if it does not. - void CheckFastSmiOnlyElements(Register map, - Label* fail, - Label::Distance distance = Label::kFar); + void CheckFastSmiElements(Register map, + Label* fail, + Label::Distance distance = Label::kFar); // Check to see if maybe_number can be stored as a double in // FastDoubleElements. If it can, store it at the index specified by key in diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc index 0029f33..cba1660 100644 --- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -42,28 +42,30 @@ namespace internal { #ifndef V8_INTERPRETED_REGEXP /* * This assembler uses the following register assignment convention - * - edx : current character. Must be loaded using LoadCurrentCharacter - * before using any of the dispatch methods. - * - edi : current position in input, as negative offset from end of string. + * - edx : Current character. Must be loaded using LoadCurrentCharacter + * before using any of the dispatch methods. Temporarily stores the + * index of capture start after a matching pass for a global regexp. + * - edi : Current position in input, as negative offset from end of string. * Please notice that this is the byte offset, not the character offset! * - esi : end of input (points to byte after last character in input). - * - ebp : frame pointer. Used to access arguments, local variables and + * - ebp : Frame pointer. Used to access arguments, local variables and * RegExp registers. - * - esp : points to tip of C stack. - * - ecx : points to tip of backtrack stack + * - esp : Points to tip of C stack. + * - ecx : Points to tip of backtrack stack * * The registers eax and ebx are free to use for computations. * * Each call to a public method should retain this convention. * The stack will have the following structure: - * - Isolate* isolate (Address of the current isolate) + * - Isolate* isolate (address of the current isolate) * - direct_call (if 1, direct call from JavaScript code, if 0 * call through the runtime system) - * - stack_area_base (High end of the memory area to use as + * - stack_area_base (high end of the memory area to use as * backtracking stack) + * - capture array size (may fit multiple sets of matches) * - int* capture_array (int[num_saved_registers_], for output). - * - end of input (Address of end of string) - * - start of input (Address of first character in string) + * - end of input (address of end of string) + * - start of input (address of first character in string) * - start index (character index of start) * - String* input_string (location of a handle containing the string) * --- frame alignment (if applicable) --- @@ -72,9 +74,10 @@ namespace internal { * - backup of caller esi * - backup of caller edi * - backup of caller ebx + * - success counter (only for global regexps to count matches). * - Offset of location before start of input (effectively character * position -1). Used to initialize capture registers to a non-position. - * - register 0 ebp[-4] (Only positions must be stored in the first + * - register 0 ebp[-4] (only positions must be stored in the first * - register 1 ebp[-8] num_saved_registers_ registers) * - ... * @@ -706,13 +709,16 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type, void RegExpMacroAssemblerIA32::Fail() { - ASSERT(FAILURE == 0); // Return value for failure is zero. - __ Set(eax, Immediate(0)); + STATIC_ASSERT(FAILURE == 0); // Return value for failure is zero. + if (!global()) { + __ Set(eax, Immediate(FAILURE)); + } __ jmp(&exit_label_); } Handle RegExpMacroAssemblerIA32::GetCode(Handle source) { + Label return_eax; // Finalize code - write the entry point code now we know how many // registers we need. @@ -731,6 +737,7 @@ Handle RegExpMacroAssemblerIA32::GetCode(Handle source) { __ push(esi); __ push(edi); __ push(ebx); // Callee-save on MacOS. + __ push(Immediate(0)); // Number of successful matches in a global regexp. __ push(Immediate(0)); // Make room for "input start - 1" constant. // Check if we have space on the stack for registers. @@ -750,13 +757,13 @@ Handle RegExpMacroAssemblerIA32::GetCode(Handle source) { // Exit with OutOfMemory exception. There is not enough space on the stack // for our working registers. __ mov(eax, EXCEPTION); - __ jmp(&exit_label_); + __ jmp(&return_eax); __ bind(&stack_limit_hit); CallCheckStackGuardState(ebx); __ or_(eax, eax); // If returned value is non-zero, we exit with the returned value as result. - __ j(not_zero, &exit_label_); + __ j(not_zero, &return_eax); __ bind(&stack_ok); // Load start index for later use. @@ -783,19 +790,8 @@ Handle RegExpMacroAssemblerIA32::GetCode(Handle source) { // position registers. __ mov(Operand(ebp, kInputStartMinusOne), eax); - if (num_saved_registers_ > 0) { // Always is, if generated from a regexp. - // Fill saved registers with initial value = start offset - 1 - // Fill in stack push order, to avoid accessing across an unwritten - // page (a problem on Windows). - __ mov(ecx, kRegisterZero); - Label init_loop; - __ bind(&init_loop); - __ mov(Operand(ebp, ecx, times_1, +0), eax); - __ sub(ecx, Immediate(kPointerSize)); - __ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize); - __ j(greater, &init_loop); - } - // Ensure that we have written to each stack page, in order. Skipping a page +#ifdef WIN32 + // Ensure that we write to each stack page, in order. Skipping a page // on Windows can cause segmentation faults. Assuming page size is 4k. const int kPageSize = 4096; const int kRegistersPerPage = kPageSize / kPointerSize; @@ -804,20 +800,45 @@ Handle RegExpMacroAssemblerIA32::GetCode(Handle source) { i += kRegistersPerPage) { __ mov(register_location(i), eax); // One write every page. } +#endif // WIN32 + + Label load_char_start_regexp, start_regexp; + // Load newline if index is at start, previous character otherwise. + __ cmp(Operand(ebp, kStartIndex), Immediate(0)); + __ j(not_equal, &load_char_start_regexp, Label::kNear); + __ mov(current_character(), '\n'); + __ jmp(&start_regexp, Label::kNear); + // Global regexp restarts matching here. + __ bind(&load_char_start_regexp); + // Load previous char as initial value of current character register. + LoadCurrentCharacterUnchecked(-1, 1); + __ bind(&start_regexp); + + // Initialize on-stack registers. + if (num_saved_registers_ > 0) { // Always is, if generated from a regexp. + // Fill saved registers with initial value = start offset - 1 + // Fill in stack push order, to avoid accessing across an unwritten + // page (a problem on Windows). + if (num_saved_registers_ > 8) { + __ mov(ecx, kRegisterZero); + Label init_loop; + __ bind(&init_loop); + __ mov(Operand(ebp, ecx, times_1, 0), eax); + __ sub(ecx, Immediate(kPointerSize)); + __ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize); + __ j(greater, &init_loop); + } else { // Unroll the loop. + for (int i = 0; i < num_saved_registers_; i++) { + __ mov(register_location(i), eax); + } + } + } // Initialize backtrack stack pointer. __ mov(backtrack_stackpointer(), Operand(ebp, kStackHighEnd)); - // Load previous char as initial value of current-character. - Label at_start; - __ cmp(Operand(ebp, kStartIndex), Immediate(0)); - __ j(equal, &at_start); - LoadCurrentCharacterUnchecked(-1, 1); // Load previous char. - __ jmp(&start_label_); - __ bind(&at_start); - __ mov(current_character(), '\n'); - __ jmp(&start_label_); + __ jmp(&start_label_); // Exit code: if (success_label_.is_linked()) { @@ -836,6 +857,10 @@ Handle RegExpMacroAssemblerIA32::GetCode(Handle source) { } for (int i = 0; i < num_saved_registers_; i++) { __ mov(eax, register_location(i)); + if (i == 0 && global()) { + // Keep capture start in edx for the zero-length check later. + __ mov(edx, eax); + } // Convert to index from start of string, not end. __ add(eax, ecx); if (mode_ == UC16) { @@ -844,10 +869,54 @@ Handle RegExpMacroAssemblerIA32::GetCode(Handle source) { __ mov(Operand(ebx, i * kPointerSize), eax); } } - __ mov(eax, Immediate(SUCCESS)); + + if (global()) { + // Restart matching if the regular expression is flagged as global. + // Increment success counter. + __ inc(Operand(ebp, kSuccessfulCaptures)); + // Capture results have been stored, so the number of remaining global + // output registers is reduced by the number of stored captures. + __ mov(ecx, Operand(ebp, kNumOutputRegisters)); + __ sub(ecx, Immediate(num_saved_registers_)); + // Check whether we have enough room for another set of capture results. + __ cmp(ecx, Immediate(num_saved_registers_)); + __ j(less, &exit_label_); + + __ mov(Operand(ebp, kNumOutputRegisters), ecx); + // Advance the location for output. + __ add(Operand(ebp, kRegisterOutput), + Immediate(num_saved_registers_ * kPointerSize)); + + // Prepare eax to initialize registers with its value in the next run. + __ mov(eax, Operand(ebp, kInputStartMinusOne)); + + // Special case for zero-length matches. + // edx: capture start index + __ cmp(edi, edx); + // Not a zero-length match, restart. + __ j(not_equal, &load_char_start_regexp); + // edi (offset from the end) is zero if we already reached the end. + __ test(edi, edi); + __ j(zero, &exit_label_, Label::kNear); + // Advance current position after a zero-length match. + if (mode_ == UC16) { + __ add(edi, Immediate(2)); + } else { + __ inc(edi); + } + __ jmp(&load_char_start_regexp); + } else { + __ mov(eax, Immediate(SUCCESS)); + } } - // Exit and return eax + __ bind(&exit_label_); + if (global()) { + // Return the number of successful captures. + __ mov(eax, Operand(ebp, kSuccessfulCaptures)); + } + + __ bind(&return_eax); // Skip esp past regexp registers. __ lea(esp, Operand(ebp, kBackup_ebx)); // Restore callee-save registers. @@ -877,7 +946,7 @@ Handle RegExpMacroAssemblerIA32::GetCode(Handle source) { __ or_(eax, eax); // If returning non-zero, we should end execution with the given // result as return value. - __ j(not_zero, &exit_label_); + __ j(not_zero, &return_eax); __ pop(edi); __ pop(backtrack_stackpointer()); @@ -924,7 +993,7 @@ Handle RegExpMacroAssemblerIA32::GetCode(Handle source) { __ bind(&exit_with_exception); // Exit with Result EXCEPTION(-1) to signal thrown exception. __ mov(eax, EXCEPTION); - __ jmp(&exit_label_); + __ jmp(&return_eax); } CodeDesc code_desc; @@ -1043,8 +1112,9 @@ void RegExpMacroAssemblerIA32::SetRegister(int register_index, int to) { } -void RegExpMacroAssemblerIA32::Succeed() { +bool RegExpMacroAssemblerIA32::Succeed() { __ jmp(&success_label_); + return global(); } diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h index 78cd069..f631ffc 100644 --- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h +++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h @@ -1,4 +1,4 @@ -// Copyright 2008-2009 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -111,7 +111,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler { virtual void ReadStackPointerFromRegister(int reg); virtual void SetCurrentPositionFromEnd(int by); virtual void SetRegister(int register_index, int to); - virtual void Succeed(); + virtual bool Succeed(); virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); virtual void ClearRegisters(int reg_from, int reg_to); virtual void WriteStackPointerToRegister(int reg); @@ -135,7 +135,11 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler { static const int kInputStart = kStartIndex + kPointerSize; static const int kInputEnd = kInputStart + kPointerSize; static const int kRegisterOutput = kInputEnd + kPointerSize; - static const int kStackHighEnd = kRegisterOutput + kPointerSize; + // For the case of global regular expression, we have room to store at least + // one set of capture results. For the case of non-global regexp, we ignore + // this value. + static const int kNumOutputRegisters = kRegisterOutput + kPointerSize; + static const int kStackHighEnd = kNumOutputRegisters + kPointerSize; static const int kDirectCall = kStackHighEnd + kPointerSize; static const int kIsolate = kDirectCall + kPointerSize; // Below the frame pointer - local stack variables. @@ -144,7 +148,8 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler { static const int kBackup_esi = kFramePointer - kPointerSize; static const int kBackup_edi = kBackup_esi - kPointerSize; static const int kBackup_ebx = kBackup_edi - kPointerSize; - static const int kInputStartMinusOne = kBackup_ebx - kPointerSize; + static const int kSuccessfulCaptures = kBackup_ebx - kPointerSize; + static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize; // First register address. Following registers are below it on the stack. static const int kRegisterZero = kInputStartMinusOne - kPointerSize; diff --git a/deps/v8/src/ia32/simulator-ia32.h b/deps/v8/src/ia32/simulator-ia32.h index 13ddf35..478d4ce 100644 --- a/deps/v8/src/ia32/simulator-ia32.h +++ b/deps/v8/src/ia32/simulator-ia32.h @@ -1,4 +1,4 @@ -// Copyright 2008 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -40,12 +40,12 @@ namespace internal { typedef int (*regexp_matcher)(String*, int, const byte*, - const byte*, int*, Address, int, Isolate*); + const byte*, int*, int, Address, int, Isolate*); // Call the generated regexp code directly. The code at the entry address should // expect eight int/pointer sized arguments and return an int. -#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \ - (FUNCTION_CAST(entry)(p0, p1, p2, p3, p4, p5, p6, p7)) +#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ + (FUNCTION_CAST(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8)) #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc index e148e2f..71740ac 100644 --- a/deps/v8/src/ia32/stub-cache-ia32.cc +++ b/deps/v8/src/ia32/stub-cache-ia32.cc @@ -1462,16 +1462,31 @@ Handle CallStubCompiler::CompileArrayPushCall( __ jmp(&fast_object); // In case of fast smi-only, convert to fast object, otherwise bail out. __ bind(¬_fast_object); - __ CheckFastSmiOnlyElements(ebx, &call_builtin); + __ CheckFastSmiElements(ebx, &call_builtin); // edi: elements array // edx: receiver // ebx: map - __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + Label try_holey_map; + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, ebx, edi, + &try_holey_map); + + ElementsTransitionGenerator:: + GenerateMapChangeElementsTransition(masm()); + // Restore edi. + __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset)); + __ jmp(&fast_object); + + __ bind(&try_holey_map); + __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS, + FAST_HOLEY_ELEMENTS, + ebx, + edi, &call_builtin); - ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm()); + ElementsTransitionGenerator:: + GenerateMapChangeElementsTransition(masm()); // Restore edi. __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset)); __ bind(&fast_object); @@ -3818,7 +3833,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( // Check that the key is a smi or a heap number convertible to a smi. GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic); - if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { + if (IsFastSmiElementsKind(elements_kind)) { __ JumpIfNotSmi(eax, &transition_elements_kind); } @@ -3843,7 +3858,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ j(not_equal, &miss_force_generic); __ bind(&finish_store); - if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { + if (IsFastSmiElementsKind(elements_kind)) { // ecx is a smi, use times_half_pointer_size instead of // times_pointer_size __ mov(FieldOperand(edi, @@ -3851,7 +3866,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( times_half_pointer_size, FixedArray::kHeaderSize), eax); } else { - ASSERT(elements_kind == FAST_ELEMENTS); + ASSERT(IsFastObjectElementsKind(elements_kind)); // Do the store and update the write barrier. // ecx is a smi, use times_half_pointer_size instead of // times_pointer_size diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc index 9772b94..134ef8b 100644 --- a/deps/v8/src/ic.cc +++ b/deps/v8/src/ic.cc @@ -1644,8 +1644,7 @@ Handle KeyedIC::ComputeMonomorphicStubWithoutMapCheck( return string_stub(); } else { ASSERT(receiver_map->has_dictionary_elements() || - receiver_map->has_fast_elements() || - receiver_map->has_fast_smi_only_elements() || + receiver_map->has_fast_smi_or_object_elements() || receiver_map->has_fast_double_elements() || receiver_map->has_external_array_elements()); bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE; @@ -1660,8 +1659,7 @@ Handle KeyedIC::ComputeMonomorphicStub(Handle receiver, StubKind stub_kind, StrictModeFlag strict_mode, Handle generic_stub) { - if (receiver->HasFastElements() || - receiver->HasFastSmiOnlyElements() || + if (receiver->HasFastSmiOrObjectElements() || receiver->HasExternalArrayElements() || receiver->HasFastDoubleElements() || receiver->HasDictionaryElements()) { @@ -1681,15 +1679,26 @@ Handle KeyedIC::ComputeTransitionedMap(Handle receiver, case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT: case KeyedIC::STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT: return JSObject::GetElementsTransitionMap(receiver, FAST_ELEMENTS); - break; case KeyedIC::STORE_TRANSITION_SMI_TO_DOUBLE: case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE: return JSObject::GetElementsTransitionMap(receiver, FAST_DOUBLE_ELEMENTS); - break; - default: + case KeyedIC::STORE_TRANSITION_HOLEY_SMI_TO_OBJECT: + case KeyedIC::STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT: + case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT: + case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT: + return JSObject::GetElementsTransitionMap(receiver, + FAST_HOLEY_ELEMENTS); + case KeyedIC::STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE: + case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE: + return JSObject::GetElementsTransitionMap(receiver, + FAST_HOLEY_DOUBLE_ELEMENTS); + case KeyedIC::LOAD: + case KeyedIC::STORE_NO_TRANSITION: + case KeyedIC::STORE_AND_GROW_NO_TRANSITION: UNREACHABLE(); - return Handle::null(); + break; } + return Handle::null(); } @@ -1749,30 +1758,54 @@ KeyedIC::StubKind KeyedStoreIC::GetStubKind(Handle receiver, if (allow_growth) { // Handle growing array in stub if necessary. - if (receiver->HasFastSmiOnlyElements()) { + if (receiver->HasFastSmiElements()) { if (value->IsHeapNumber()) { - return STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE; + if (receiver->HasFastHoleyElements()) { + return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE; + } else { + return STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE; + } } if (value->IsHeapObject()) { - return STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT; + if (receiver->HasFastHoleyElements()) { + return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT; + } else { + return STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT; + } } } else if (receiver->HasFastDoubleElements()) { if (!value->IsSmi() && !value->IsHeapNumber()) { - return STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT; + if (receiver->HasFastHoleyElements()) { + return STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT; + } else { + return STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT; + } } } return STORE_AND_GROW_NO_TRANSITION; } else { // Handle only in-bounds elements accesses. - if (receiver->HasFastSmiOnlyElements()) { + if (receiver->HasFastSmiElements()) { if (value->IsHeapNumber()) { - return STORE_TRANSITION_SMI_TO_DOUBLE; + if (receiver->HasFastHoleyElements()) { + return STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE; + } else { + return STORE_TRANSITION_SMI_TO_DOUBLE; + } } else if (value->IsHeapObject()) { - return STORE_TRANSITION_SMI_TO_OBJECT; + if (receiver->HasFastHoleyElements()) { + return STORE_TRANSITION_HOLEY_SMI_TO_OBJECT; + } else { + return STORE_TRANSITION_SMI_TO_OBJECT; + } } } else if (receiver->HasFastDoubleElements()) { if (!value->IsSmi() && !value->IsHeapNumber()) { - return STORE_TRANSITION_DOUBLE_TO_OBJECT; + if (receiver->HasFastHoleyElements()) { + return STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT; + } else { + return STORE_TRANSITION_DOUBLE_TO_OBJECT; + } } } return STORE_NO_TRANSITION; diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h index 3b44abf..c1b9549 100644 --- a/deps/v8/src/ic.h +++ b/deps/v8/src/ic.h @@ -378,10 +378,16 @@ class KeyedIC: public IC { STORE_TRANSITION_SMI_TO_OBJECT, STORE_TRANSITION_SMI_TO_DOUBLE, STORE_TRANSITION_DOUBLE_TO_OBJECT, + STORE_TRANSITION_HOLEY_SMI_TO_OBJECT, + STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE, + STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT, STORE_AND_GROW_NO_TRANSITION, STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT, STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE, - STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT + STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT, + STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT, + STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE, + STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT }; static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION - diff --git a/deps/v8/src/incremental-marking-inl.h b/deps/v8/src/incremental-marking-inl.h index 5ce003f..2dae6f2 100644 --- a/deps/v8/src/incremental-marking-inl.h +++ b/deps/v8/src/incremental-marking-inl.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -118,13 +118,29 @@ void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj, void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) { - WhiteToGrey(obj, mark_bit); + Marking::WhiteToGrey(mark_bit); marking_deque_.PushGrey(obj); } -void IncrementalMarking::WhiteToGrey(HeapObject* obj, MarkBit mark_bit) { - Marking::WhiteToGrey(mark_bit); +bool IncrementalMarking::MarkObjectAndPush(HeapObject* obj) { + MarkBit mark_bit = Marking::MarkBitFrom(obj); + if (!mark_bit.Get()) { + WhiteToGreyAndPush(obj, mark_bit); + return true; + } + return false; +} + + +bool IncrementalMarking::MarkObjectWithoutPush(HeapObject* obj) { + MarkBit mark_bit = Marking::MarkBitFrom(obj); + if (!mark_bit.Get()) { + mark_bit.Set(); + MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size()); + return true; + } + return false; } diff --git a/deps/v8/src/incremental-marking.cc b/deps/v8/src/incremental-marking.cc index 5b58c9d..94afffa 100644 --- a/deps/v8/src/incremental-marking.cc +++ b/deps/v8/src/incremental-marking.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -42,6 +42,7 @@ IncrementalMarking::IncrementalMarking(Heap* heap) state_(STOPPED), marking_deque_memory_(NULL), marking_deque_memory_committed_(false), + marker_(this, heap->mark_compact_collector()), steps_count_(0), steps_took_(0), longest_step_(0.0), @@ -663,6 +664,22 @@ void IncrementalMarking::Hurry() { } else if (map == global_context_map) { // Global contexts have weak fields. VisitGlobalContext(Context::cast(obj), &marking_visitor); + } else if (map->instance_type() == MAP_TYPE) { + Map* map = Map::cast(obj); + heap_->ClearCacheOnMap(map); + + // When map collection is enabled we have to mark through map's + // transitions and back pointers in a special way to make these links + // weak. Only maps for subclasses of JSReceiver can have transitions. + STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); + if (FLAG_collect_maps && + map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { + marker_.MarkMapContents(map); + } else { + marking_visitor.VisitPointers( + HeapObject::RawField(map, Map::kPointerFieldsBeginOffset), + HeapObject::RawField(map, Map::kPointerFieldsEndOffset)); + } } else { obj->Iterate(&marking_visitor); } @@ -807,12 +824,6 @@ void IncrementalMarking::Step(intptr_t allocated_bytes, Map* map = obj->map(); if (map == filler_map) continue; - if (obj->IsMap()) { - Map* map = Map::cast(obj); - heap_->ClearCacheOnMap(map); - } - - int size = obj->SizeFromMap(map); bytes_to_process -= size; MarkBit map_mark_bit = Marking::MarkBitFrom(map); @@ -830,6 +841,22 @@ void IncrementalMarking::Step(intptr_t allocated_bytes, MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache()); VisitGlobalContext(ctx, &marking_visitor); + } else if (map->instance_type() == MAP_TYPE) { + Map* map = Map::cast(obj); + heap_->ClearCacheOnMap(map); + + // When map collection is enabled we have to mark through map's + // transitions and back pointers in a special way to make these links + // weak. Only maps for subclasses of JSReceiver can have transitions. + STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); + if (FLAG_collect_maps && + map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { + marker_.MarkMapContents(map); + } else { + marking_visitor.VisitPointers( + HeapObject::RawField(map, Map::kPointerFieldsBeginOffset), + HeapObject::RawField(map, Map::kPointerFieldsEndOffset)); + } } else if (map->instance_type() == JS_FUNCTION_TYPE) { marking_visitor.VisitPointers( HeapObject::RawField(obj, JSFunction::kPropertiesOffset), diff --git a/deps/v8/src/incremental-marking.h b/deps/v8/src/incremental-marking.h index 8cbe6c1..39e8dae 100644 --- a/deps/v8/src/incremental-marking.h +++ b/deps/v8/src/incremental-marking.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -154,8 +154,6 @@ class IncrementalMarking { inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit); - inline void WhiteToGrey(HeapObject* obj, MarkBit mark_bit); - // Does white->black or keeps gray or black color. Returns true if converting // white to black. inline bool MarkBlackOrKeepGrey(MarkBit mark_bit) { @@ -169,6 +167,16 @@ class IncrementalMarking { return true; } + // Marks the object grey and pushes it on the marking stack. + // Returns true if object needed marking and false otherwise. + // This is for incremental marking only. + INLINE(bool MarkObjectAndPush(HeapObject* obj)); + + // Marks the object black without pushing it on the marking stack. + // Returns true if object needed marking and false otherwise. + // This is for incremental marking only. + INLINE(bool MarkObjectWithoutPush(HeapObject* obj)); + inline int steps_count() { return steps_count_; } @@ -260,6 +268,7 @@ class IncrementalMarking { VirtualMemory* marking_deque_memory_; bool marking_deque_memory_committed_; MarkingDeque marking_deque_; + Marker marker_; int steps_count_; double steps_took_; diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h index f1c9b3c..f51b4e1 100644 --- a/deps/v8/src/isolate.h +++ b/deps/v8/src/isolate.h @@ -965,7 +965,7 @@ class Isolate { // SerializerDeserializer state. static const int kPartialSnapshotCacheCapacity = 1400; - static const int kJSRegexpStaticOffsetsVectorSize = 50; + static const int kJSRegexpStaticOffsetsVectorSize = 128; Address external_callback() { return thread_local_top_.external_callback_; diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc index 3455abc..c5e73ed 100644 --- a/deps/v8/src/jsregexp.cc +++ b/deps/v8/src/jsregexp.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -324,7 +324,7 @@ Handle RegExpImpl::AtomExec(Handle re, index))); if (index == -1) return isolate->factory()->null_value(); } - ASSERT(last_match_info->HasFastElements()); + ASSERT(last_match_info->HasFastObjectElements()); { NoHandleAllocation no_handles; @@ -429,6 +429,7 @@ bool RegExpImpl::CompileIrregexp(Handle re, RegExpEngine::CompilationResult result = RegExpEngine::Compile(&compile_data, flags.is_ignore_case(), + flags.is_global(), flags.is_multiline(), pattern, sample_subject, @@ -515,7 +516,23 @@ int RegExpImpl::IrregexpPrepare(Handle regexp, } -RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce( +int RegExpImpl::GlobalOffsetsVectorSize(Handle regexp, + int registers_per_match, + int* max_matches) { +#ifdef V8_INTERPRETED_REGEXP + // Global loop in interpreted regexp is not implemented. Therefore we choose + // the size of the offsets vector so that it can only store one match. + *max_matches = 1; + return registers_per_match; +#else // V8_INTERPRETED_REGEXP + int size = Max(registers_per_match, OffsetsVector::kStaticOffsetsVectorSize); + *max_matches = size / registers_per_match; + return size; +#endif // V8_INTERPRETED_REGEXP +} + + +int RegExpImpl::IrregexpExecRaw( Handle regexp, Handle subject, int index, @@ -617,7 +634,7 @@ Handle RegExpImpl::IrregexpExec(Handle jsregexp, OffsetsVector registers(required_registers, isolate); - IrregexpResult res = RegExpImpl::IrregexpExecOnce( + int res = RegExpImpl::IrregexpExecRaw( jsregexp, subject, previous_index, Vector(registers.vector(), registers.length())); if (res == RE_SUCCESS) { @@ -5780,6 +5797,7 @@ void DispatchTableConstructor::VisitAction(ActionNode* that) { RegExpEngine::CompilationResult RegExpEngine::Compile( RegExpCompileData* data, bool ignore_case, + bool is_global, bool is_multiline, Handle pattern, Handle sample_subject, @@ -5883,6 +5901,8 @@ RegExpEngine::CompilationResult RegExpEngine::Compile( macro_assembler.SetCurrentPositionFromEnd(max_length); } + macro_assembler.set_global(is_global); + return compiler.Assemble(¯o_assembler, node, data->capture_count, diff --git a/deps/v8/src/jsregexp.h b/deps/v8/src/jsregexp.h index 20313ca..2e90e9a 100644 --- a/deps/v8/src/jsregexp.h +++ b/deps/v8/src/jsregexp.h @@ -109,16 +109,22 @@ class RegExpImpl { static int IrregexpPrepare(Handle regexp, Handle subject); - // Execute a regular expression once on the subject, starting from - // character "index". - // If successful, returns RE_SUCCESS and set the capture positions - // in the first registers. + // Calculate the size of offsets vector for the case of global regexp + // and the number of matches this vector is able to store. + static int GlobalOffsetsVectorSize(Handle regexp, + int registers_per_match, + int* max_matches); + + // Execute a regular expression on the subject, starting from index. + // If matching succeeds, return the number of matches. This can be larger + // than one in the case of global regular expressions. + // The captures and subcaptures are stored into the registers vector. // If matching fails, returns RE_FAILURE. // If execution fails, sets a pending exception and returns RE_EXCEPTION. - static IrregexpResult IrregexpExecOnce(Handle regexp, - Handle subject, - int index, - Vector registers); + static int IrregexpExecRaw(Handle regexp, + Handle subject, + int index, + Vector registers); // Execute an Irregexp bytecode pattern. // On a successful match, the result is a JSArray containing @@ -1545,6 +1551,7 @@ class RegExpEngine: public AllStatic { static CompilationResult Compile(RegExpCompileData* input, bool ignore_case, + bool global, bool multiline, Handle pattern, Handle sample_subject, @@ -1573,7 +1580,8 @@ class OffsetsVector { inline int* vector() { return vector_; } inline int length() { return offsets_vector_length_; } - static const int kStaticOffsetsVectorSize = 50; + static const int kStaticOffsetsVectorSize = + Isolate::kJSRegexpStaticOffsetsVectorSize; private: static Address static_offsets_vector_address(Isolate* isolate) { diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc index c41cce8..4ee2a7a 100644 --- a/deps/v8/src/lithium.cc +++ b/deps/v8/src/lithium.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -225,9 +225,12 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) { return 2; case EXTERNAL_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: return 3; - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: return kPointerSizeLog2; diff --git a/deps/v8/src/mark-compact-inl.h b/deps/v8/src/mark-compact-inl.h index 43f6b89..2f7e31f 100644 --- a/deps/v8/src/mark-compact-inl.h +++ b/deps/v8/src/mark-compact-inl.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -52,6 +52,15 @@ void MarkCompactCollector::SetFlags(int flags) { } +bool MarkCompactCollector::MarkObjectAndPush(HeapObject* obj) { + if (MarkObjectWithoutPush(obj)) { + marking_deque_.PushBlack(obj); + return true; + } + return false; +} + + void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) { ASSERT(Marking::MarkBitFrom(obj) == mark_bit); if (!mark_bit.Get()) { @@ -62,16 +71,13 @@ void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) { } -bool MarkCompactCollector::MarkObjectWithoutPush(HeapObject* object) { - MarkBit mark = Marking::MarkBitFrom(object); - bool old_mark = mark.Get(); - if (!old_mark) SetMark(object, mark); - return old_mark; -} - - -void MarkCompactCollector::MarkObjectAndPush(HeapObject* object) { - if (!MarkObjectWithoutPush(object)) marking_deque_.PushBlack(object); +bool MarkCompactCollector::MarkObjectWithoutPush(HeapObject* obj) { + MarkBit mark_bit = Marking::MarkBitFrom(obj); + if (!mark_bit.Get()) { + SetMark(obj, mark_bit); + return true; + } + return false; } diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc index 0aa1192..6954268 100644 --- a/deps/v8/src/mark-compact.cc +++ b/deps/v8/src/mark-compact.cc @@ -64,13 +64,13 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT abort_incremental_marking_(false), compacting_(false), was_marked_incrementally_(false), - collect_maps_(FLAG_collect_maps), flush_monomorphic_ics_(false), tracer_(NULL), migration_slots_buffer_(NULL), heap_(NULL), code_flusher_(NULL), - encountered_weak_maps_(NULL) { } + encountered_weak_maps_(NULL), + marker_(this, this) { } #ifdef DEBUG @@ -282,7 +282,7 @@ void MarkCompactCollector::CollectGarbage() { MarkLiveObjects(); ASSERT(heap_->incremental_marking()->IsStopped()); - if (collect_maps_) ClearNonLiveTransitions(); + if (FLAG_collect_maps) ClearNonLiveTransitions(); ClearWeakMaps(); @@ -294,7 +294,7 @@ void MarkCompactCollector::CollectGarbage() { SweepSpaces(); - if (!collect_maps_) ReattachInitialMaps(); + if (!FLAG_collect_maps) ReattachInitialMaps(); Finish(); @@ -658,11 +658,6 @@ void MarkCompactCollector::AbortCompaction() { void MarkCompactCollector::Prepare(GCTracer* tracer) { was_marked_incrementally_ = heap()->incremental_marking()->IsMarking(); - // Disable collection of maps if incremental marking is enabled. - // Map collection algorithm relies on a special map transition tree traversal - // order which is not implemented for incremental marking. - collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_; - // Monomorphic ICs are preserved when possible, but need to be flushed // when they might be keeping a Context alive, or when the heap is about // to be serialized. @@ -1798,11 +1793,11 @@ void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) { heap_->ClearCacheOnMap(map); // When map collection is enabled we have to mark through map's transitions - // in a special way to make transition links weak. - // Only maps for subclasses of JSReceiver can have transitions. + // in a special way to make transition links weak. Only maps for subclasses + // of JSReceiver can have transitions. STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); - if (collect_maps_ && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { - MarkMapContents(map); + if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { + marker_.MarkMapContents(map); } else { marking_deque_.PushBlack(map); } @@ -1812,85 +1807,86 @@ void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) { } -void MarkCompactCollector::MarkMapContents(Map* map) { +// Force instantiation of template instances. +template void Marker::MarkMapContents(Map* map); +template void Marker::MarkMapContents(Map* map); + + +template +void Marker::MarkMapContents(Map* map) { // Mark prototype transitions array but don't push it into marking stack. // This will make references from it weak. We will clean dead prototype - // transitions in ClearNonLiveTransitions. But make sure that back pointers - // stored inside prototype transitions arrays are marked. - Object* raw_proto_transitions = map->unchecked_prototype_transitions(); - if (raw_proto_transitions->IsFixedArray()) { - FixedArray* prototype_transitions = FixedArray::cast(raw_proto_transitions); + // transitions in ClearNonLiveTransitions. + Object** proto_trans_slot = + HeapObject::RawField(map, Map::kPrototypeTransitionsOrBackPointerOffset); + HeapObject* prototype_transitions = HeapObject::cast(*proto_trans_slot); + if (prototype_transitions->IsFixedArray()) { + mark_compact_collector()->RecordSlot(proto_trans_slot, + proto_trans_slot, + prototype_transitions); MarkBit mark = Marking::MarkBitFrom(prototype_transitions); if (!mark.Get()) { mark.Set(); MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(), prototype_transitions->Size()); - MarkObjectAndPush(HeapObject::cast( - prototype_transitions->get(Map::kProtoTransitionBackPointerOffset))); } } - Object** raw_descriptor_array_slot = + // Make sure that the back pointer stored either in the map itself or inside + // its prototype transitions array is marked. Treat pointers in the descriptor + // array as weak and also mark that array to prevent visiting it later. + base_marker()->MarkObjectAndPush(HeapObject::cast(map->GetBackPointer())); + + Object** descriptor_array_slot = HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset); - Object* raw_descriptor_array = *raw_descriptor_array_slot; - if (!raw_descriptor_array->IsSmi()) { - MarkDescriptorArray( - reinterpret_cast(raw_descriptor_array)); + Object* descriptor_array = *descriptor_array_slot; + if (!descriptor_array->IsSmi()) { + MarkDescriptorArray(reinterpret_cast(descriptor_array)); + } + + // Mark the Object* fields of the Map. Since the descriptor array has been + // marked already, it is fine that one of these fields contains a pointer + // to it. But make sure to skip back pointer and prototype transitions. + STATIC_ASSERT(Map::kPointerFieldsEndOffset == + Map::kPrototypeTransitionsOrBackPointerOffset + kPointerSize); + Object** start_slot = HeapObject::RawField( + map, Map::kPointerFieldsBeginOffset); + Object** end_slot = HeapObject::RawField( + map, Map::kPrototypeTransitionsOrBackPointerOffset); + for (Object** slot = start_slot; slot < end_slot; slot++) { + Object* obj = *slot; + if (!obj->NonFailureIsHeapObject()) continue; + mark_compact_collector()->RecordSlot(start_slot, slot, obj); + base_marker()->MarkObjectAndPush(reinterpret_cast(obj)); } - - // Mark the Object* fields of the Map. - // Since the descriptor array has been marked already, it is fine - // that one of these fields contains a pointer to it. - Object** start_slot = HeapObject::RawField(map, - Map::kPointerFieldsBeginOffset); - - Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset); - - StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot); } -void MarkCompactCollector::MarkAccessorPairSlot(HeapObject* accessors, - int offset) { - Object** slot = HeapObject::RawField(accessors, offset); - HeapObject* accessor = HeapObject::cast(*slot); - if (accessor->IsMap()) return; - RecordSlot(slot, slot, accessor); - MarkObjectAndPush(accessor); -} - - -void MarkCompactCollector::MarkDescriptorArray( - DescriptorArray* descriptors) { - MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors); - if (descriptors_mark.Get()) return; +template +void Marker::MarkDescriptorArray(DescriptorArray* descriptors) { // Empty descriptor array is marked as a root before any maps are marked. - ASSERT(descriptors != heap()->empty_descriptor_array()); - SetMark(descriptors, descriptors_mark); + ASSERT(descriptors != descriptors->GetHeap()->empty_descriptor_array()); - FixedArray* contents = reinterpret_cast( + // The DescriptorArray contains a pointer to its contents array, but the + // contents array will be marked black and hence not be visited again. + if (!base_marker()->MarkObjectAndPush(descriptors)) return; + FixedArray* contents = FixedArray::cast( descriptors->get(DescriptorArray::kContentArrayIndex)); - ASSERT(contents->IsHeapObject()); - ASSERT(!IsMarked(contents)); - ASSERT(contents->IsFixedArray()); ASSERT(contents->length() >= 2); - MarkBit contents_mark = Marking::MarkBitFrom(contents); - SetMark(contents, contents_mark); - // Contents contains (value, details) pairs. If the details say that the type - // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, - // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as - // live. Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and - // CONSTANT_TRANSITION is the value an Object* (a Map*). + ASSERT(Marking::IsWhite(Marking::MarkBitFrom(contents))); + base_marker()->MarkObjectWithoutPush(contents); + + // Contents contains (value, details) pairs. If the descriptor contains a + // transition (value is a Map), we don't mark the value as live. It might + // be set to the NULL_DESCRIPTOR in ClearNonLiveTransitions later. for (int i = 0; i < contents->length(); i += 2) { - // If the pair (value, details) at index i, i+1 is not - // a transition or null descriptor, mark the value. PropertyDetails details(Smi::cast(contents->get(i + 1))); Object** slot = contents->data_start() + i; if (!(*slot)->IsHeapObject()) continue; HeapObject* value = HeapObject::cast(*slot); - RecordSlot(slot, slot, *slot); + mark_compact_collector()->RecordSlot(slot, slot, *slot); switch (details.type()) { case NORMAL: @@ -1898,21 +1894,22 @@ void MarkCompactCollector::MarkDescriptorArray( case CONSTANT_FUNCTION: case HANDLER: case INTERCEPTOR: - MarkObjectAndPush(value); + base_marker()->MarkObjectAndPush(value); break; case CALLBACKS: if (!value->IsAccessorPair()) { - MarkObjectAndPush(value); - } else if (!MarkObjectWithoutPush(value)) { - MarkAccessorPairSlot(value, AccessorPair::kGetterOffset); - MarkAccessorPairSlot(value, AccessorPair::kSetterOffset); + base_marker()->MarkObjectAndPush(value); + } else if (base_marker()->MarkObjectWithoutPush(value)) { + AccessorPair* accessors = AccessorPair::cast(value); + MarkAccessorPairSlot(accessors, AccessorPair::kGetterOffset); + MarkAccessorPairSlot(accessors, AccessorPair::kSetterOffset); } break; case ELEMENTS_TRANSITION: // For maps with multiple elements transitions, the transition maps are // stored in a FixedArray. Keep the fixed array alive but not the maps // that it refers to. - if (value->IsFixedArray()) MarkObjectWithoutPush(value); + if (value->IsFixedArray()) base_marker()->MarkObjectWithoutPush(value); break; case MAP_TRANSITION: case CONSTANT_TRANSITION: @@ -1920,9 +1917,16 @@ void MarkCompactCollector::MarkDescriptorArray( break; } } - // The DescriptorArray descriptors contains a pointer to its contents array, - // but the contents array is already marked. - marking_deque_.PushBlack(descriptors); +} + + +template +void Marker::MarkAccessorPairSlot(AccessorPair* accessors, int offset) { + Object** slot = HeapObject::RawField(accessors, offset); + HeapObject* accessor = HeapObject::cast(*slot); + if (accessor->IsMap()) return; + mark_compact_collector()->RecordSlot(slot, slot, accessor); + base_marker()->MarkObjectAndPush(accessor); } @@ -2734,7 +2738,9 @@ static void UpdatePointer(HeapObject** p, HeapObject* object) { // We have to zap this pointer, because the store buffer may overflow later, // and then we have to scan the entire heap and we don't want to find // spurious newspace pointers in the old space. - *p = reinterpret_cast(Smi::FromInt(0)); + // TODO(mstarzinger): This was changed to a sentinel value to track down + // rare crashes, change it back to Smi::FromInt(0) later. + *p = reinterpret_cast(Smi::FromInt(0x0f100d00 >> 1)); // flood } } diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h index 6420a21..dbc2869 100644 --- a/deps/v8/src/mark-compact.h +++ b/deps/v8/src/mark-compact.h @@ -42,6 +42,7 @@ typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset); // Forward declarations. class CodeFlusher; class GCTracer; +class MarkCompactCollector; class MarkingVisitor; class RootMarkingVisitor; @@ -166,7 +167,6 @@ class Marking { // ---------------------------------------------------------------------------- // Marking deque for tracing live objects. - class MarkingDeque { public: MarkingDeque() @@ -383,6 +383,34 @@ class SlotsBuffer { }; +// ------------------------------------------------------------------------- +// Marker shared between incremental and non-incremental marking +template class Marker { + public: + Marker(BaseMarker* base_marker, MarkCompactCollector* mark_compact_collector) + : base_marker_(base_marker), + mark_compact_collector_(mark_compact_collector) {} + + // Mark pointers in a Map and its DescriptorArray together, possibly + // treating transitions or back pointers weak. + void MarkMapContents(Map* map); + void MarkDescriptorArray(DescriptorArray* descriptors); + void MarkAccessorPairSlot(AccessorPair* accessors, int offset); + + private: + BaseMarker* base_marker() { + return base_marker_; + } + + MarkCompactCollector* mark_compact_collector() { + return mark_compact_collector_; + } + + BaseMarker* base_marker_; + MarkCompactCollector* mark_compact_collector_; +}; + + // Defined in isolate.h. class ThreadLocalTop; @@ -584,8 +612,6 @@ class MarkCompactCollector { bool was_marked_incrementally_; - bool collect_maps_; - bool flush_monomorphic_ics_; // A pointer to the current stack-allocated GC tracer object during a full @@ -608,12 +634,13 @@ class MarkCompactCollector { // // After: Live objects are marked and non-live objects are unmarked. - friend class RootMarkingVisitor; friend class MarkingVisitor; friend class StaticMarkingVisitor; friend class CodeMarkingVisitor; friend class SharedFunctionInfoMarkingVisitor; + friend class Marker; + friend class Marker; // Mark non-optimize code for functions inlined into the given optimized // code. This will prevent it from being flushed. @@ -631,22 +658,25 @@ class MarkCompactCollector { void AfterMarking(); // Marks the object black and pushes it on the marking stack. - // This is for non-incremental marking. + // Returns true if object needed marking and false otherwise. + // This is for non-incremental marking only. + INLINE(bool MarkObjectAndPush(HeapObject* obj)); + + // Marks the object black and pushes it on the marking stack. + // This is for non-incremental marking only. INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit)); - INLINE(bool MarkObjectWithoutPush(HeapObject* object)); - INLINE(void MarkObjectAndPush(HeapObject* value)); + // Marks the object black without pushing it on the marking stack. + // Returns true if object needed marking and false otherwise. + // This is for non-incremental marking only. + INLINE(bool MarkObjectWithoutPush(HeapObject* obj)); - // Marks the object black. This is for non-incremental marking. + // Marks the object black assuming that it is not yet marked. + // This is for non-incremental marking only. INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit)); void ProcessNewlyMarkedObject(HeapObject* obj); - // Mark a Map and its DescriptorArray together, skipping transitions. - void MarkMapContents(Map* map); - void MarkAccessorPairSlot(HeapObject* accessors, int offset); - void MarkDescriptorArray(DescriptorArray* descriptors); - // Mark the heap roots and all objects reachable from them. void MarkRoots(RootMarkingVisitor* visitor); @@ -749,6 +779,7 @@ class MarkCompactCollector { MarkingDeque marking_deque_; CodeFlusher* code_flusher_; Object* encountered_weak_maps_; + Marker marker_; List evacuation_candidates_; List invalidated_code_; diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js index f8b5766..2a00ba8 100644 --- a/deps/v8/src/messages.js +++ b/deps/v8/src/messages.js @@ -61,18 +61,21 @@ function FormatString(format, message) { // To check if something is a native error we need to check the -// concrete native error types. It is not enough to check "obj -// instanceof $Error" because user code can replace -// NativeError.prototype.__proto__. User code cannot replace -// NativeError.prototype though and therefore this is a safe test. +// concrete native error types. It is not sufficient to use instanceof +// since it possible to create an object that has Error.prototype on +// its prototype chain. This is the case for DOMException for example. function IsNativeErrorObject(obj) { - return (obj instanceof $Error) || - (obj instanceof $EvalError) || - (obj instanceof $RangeError) || - (obj instanceof $ReferenceError) || - (obj instanceof $SyntaxError) || - (obj instanceof $TypeError) || - (obj instanceof $URIError); + switch (%_ClassOf(obj)) { + case 'Error': + case 'EvalError': + case 'RangeError': + case 'ReferenceError': + case 'SyntaxError': + case 'TypeError': + case 'URIError': + return true; + } + return false; } @@ -745,7 +748,7 @@ function GetPositionInLine(message) { function GetStackTraceLine(recv, fun, pos, isGlobal) { - return FormatSourcePosition(new CallSite(recv, fun, pos)); + return new CallSite(recv, fun, pos).toString(); } // ---------------------------------------------------------------------------- @@ -785,15 +788,7 @@ function CallSiteGetThis() { } function CallSiteGetTypeName() { - var constructor = this.receiver.constructor; - if (!constructor) { - return %_CallFunction(this.receiver, ObjectToString); - } - var constructorName = constructor.name; - if (!constructorName) { - return %_CallFunction(this.receiver, ObjectToString); - } - return constructorName; + return GetTypeName(this, false); } function CallSiteIsToplevel() { @@ -827,8 +822,10 @@ function CallSiteGetFunctionName() { var name = this.fun.name; if (name) { return name; - } else { - return %FunctionGetInferredName(this.fun); + } + name = %FunctionGetInferredName(this.fun); + if (name) { + return name; } // Maybe this is an evaluation? var script = %FunctionGetScript(this.fun); @@ -919,6 +916,69 @@ function CallSiteIsConstructor() { return this.fun === constructor; } +function CallSiteToString() { + var fileName; + var fileLocation = ""; + if (this.isNative()) { + fileLocation = "native"; + } else if (this.isEval()) { + fileName = this.getScriptNameOrSourceURL(); + if (!fileName) { + fileLocation = this.getEvalOrigin(); + } + } else { + fileName = this.getFileName(); + } + + if (fileName) { + fileLocation += fileName; + var lineNumber = this.getLineNumber(); + if (lineNumber != null) { + fileLocation += ":" + lineNumber; + var columnNumber = this.getColumnNumber(); + if (columnNumber) { + fileLocation += ":" + columnNumber; + } + } + } + + if (!fileLocation) { + fileLocation = "unknown source"; + } + var line = ""; + var functionName = this.getFunctionName(); + var addSuffix = true; + var isConstructor = this.isConstructor(); + var isMethodCall = !(this.isToplevel() || isConstructor); + if (isMethodCall) { + var typeName = GetTypeName(this, true); + var methodName = this.getMethodName(); + if (functionName) { + if (typeName && functionName.indexOf(typeName) != 0) { + line += typeName + "."; + } + line += functionName; + if (methodName && functionName.lastIndexOf("." + methodName) != + functionName.length - methodName.length - 1) { + line += " [as " + methodName + "]"; + } + } else { + line += typeName + "." + (methodName || ""); + } + } else if (isConstructor) { + line += "new " + (functionName || ""); + } else if (functionName) { + line += functionName; + } else { + line += fileLocation; + addSuffix = false; + } + if (addSuffix) { + line += " (" + fileLocation + ")"; + } + return line; +} + SetUpLockedPrototype(CallSite, $Array("receiver", "fun", "pos"), $Array( "getThis", CallSiteGetThis, "getTypeName", CallSiteGetTypeName, @@ -934,7 +994,8 @@ SetUpLockedPrototype(CallSite, $Array("receiver", "fun", "pos"), $Array( "getColumnNumber", CallSiteGetColumnNumber, "isNative", CallSiteIsNative, "getPosition", CallSiteGetPosition, - "isConstructor", CallSiteIsConstructor + "isConstructor", CallSiteIsConstructor, + "toString", CallSiteToString )); @@ -976,65 +1037,6 @@ function FormatEvalOrigin(script) { return eval_origin; } -function FormatSourcePosition(frame) { - var fileName; - var fileLocation = ""; - if (frame.isNative()) { - fileLocation = "native"; - } else if (frame.isEval()) { - fileName = frame.getScriptNameOrSourceURL(); - if (!fileName) { - fileLocation = frame.getEvalOrigin(); - } - } else { - fileName = frame.getFileName(); - } - - if (fileName) { - fileLocation += fileName; - var lineNumber = frame.getLineNumber(); - if (lineNumber != null) { - fileLocation += ":" + lineNumber; - var columnNumber = frame.getColumnNumber(); - if (columnNumber) { - fileLocation += ":" + columnNumber; - } - } - } - - if (!fileLocation) { - fileLocation = "unknown source"; - } - var line = ""; - var functionName = frame.getFunction().name; - var addPrefix = true; - var isConstructor = frame.isConstructor(); - var isMethodCall = !(frame.isToplevel() || isConstructor); - if (isMethodCall) { - var methodName = frame.getMethodName(); - line += frame.getTypeName() + "."; - if (functionName) { - line += functionName; - if (methodName && (methodName != functionName)) { - line += " [as " + methodName + "]"; - } - } else { - line += methodName || ""; - } - } else if (isConstructor) { - line += "new " + (functionName || ""); - } else if (functionName) { - line += functionName; - } else { - line += fileLocation; - addPrefix = false; - } - if (addPrefix) { - line += " (" + fileLocation + ")"; - } - return line; -} - function FormatStackTrace(error, frames) { var lines = []; try { @@ -1050,7 +1052,7 @@ function FormatStackTrace(error, frames) { var frame = frames[i]; var line; try { - line = FormatSourcePosition(frame); + line = frame.toString(); } catch (e) { try { line = ""; @@ -1081,6 +1083,19 @@ function FormatRawStackTrace(error, raw_stack) { } } +function GetTypeName(obj, requireConstructor) { + var constructor = obj.receiver.constructor; + if (!constructor) { + return requireConstructor ? null : + %_CallFunction(obj.receiver, ObjectToString); + } + var constructorName = constructor.name; + if (!constructorName) { + return requireConstructor ? null : + %_CallFunction(obj.receiver, ObjectToString); + } + return constructorName; +} function captureStackTrace(obj, cons_opt) { var stackTraceLimit = $Error.stackTraceLimit; diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc index eeb84c3..5a2074e 100644 --- a/deps/v8/src/mips/builtins-mips.cc +++ b/deps/v8/src/mips/builtins-mips.cc @@ -118,7 +118,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm, Label* gc_required) { const int initial_capacity = JSArray::kPreallocatedArrayElements; STATIC_ASSERT(initial_capacity >= 0); - __ LoadInitialArrayMap(array_function, scratch2, scratch1); + __ LoadInitialArrayMap(array_function, scratch2, scratch1, false); // Allocate the JSArray object together with space for a fixed array with the // requested elements. @@ -214,7 +214,8 @@ static void AllocateJSArray(MacroAssembler* masm, bool fill_with_hole, Label* gc_required) { // Load the initial map from the array function. - __ LoadInitialArrayMap(array_function, scratch2, elements_array_storage); + __ LoadInitialArrayMap(array_function, scratch2, + elements_array_storage, fill_with_hole); if (FLAG_debug_code) { // Assert that array size is not zero. __ Assert( @@ -449,10 +450,10 @@ static void ArrayNativeCode(MacroAssembler* masm, __ Branch(call_generic_code); __ bind(¬_double); - // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS. + // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS. // a3: JSArray __ lw(a2, FieldMemOperand(a3, HeapObject::kMapOffset)); - __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, a2, t5, diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc index f3dd95b..a464348 100644 --- a/deps/v8/src/mips/code-stubs-mips.cc +++ b/deps/v8/src/mips/code-stubs-mips.cc @@ -5043,7 +5043,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { 1, a0, a2); // Isolates: note we add an additional parameter here (isolate pointer). - const int kRegExpExecuteArguments = 8; + const int kRegExpExecuteArguments = 9; const int kParameterRegisters = 4; __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); @@ -5054,27 +5054,33 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // allocating space for the c argument slots, we don't need to calculate // that into the argument positions on the stack. This is how the stack will // look (sp meaning the value of sp at this moment): + // [sp + 5] - Argument 9 // [sp + 4] - Argument 8 // [sp + 3] - Argument 7 // [sp + 2] - Argument 6 // [sp + 1] - Argument 5 // [sp + 0] - saved ra - // Argument 8: Pass current isolate address. + // Argument 9: Pass current isolate address. // CFunctionArgumentOperand handles MIPS stack argument slots. __ li(a0, Operand(ExternalReference::isolate_address())); - __ sw(a0, MemOperand(sp, 4 * kPointerSize)); + __ sw(a0, MemOperand(sp, 5 * kPointerSize)); - // Argument 7: Indicate that this is a direct call from JavaScript. + // Argument 8: Indicate that this is a direct call from JavaScript. __ li(a0, Operand(1)); - __ sw(a0, MemOperand(sp, 3 * kPointerSize)); + __ sw(a0, MemOperand(sp, 4 * kPointerSize)); - // Argument 6: Start (high end) of backtracking stack memory area. + // Argument 7: Start (high end) of backtracking stack memory area. __ li(a0, Operand(address_of_regexp_stack_memory_address)); __ lw(a0, MemOperand(a0, 0)); __ li(a2, Operand(address_of_regexp_stack_memory_size)); __ lw(a2, MemOperand(a2, 0)); __ addu(a0, a0, a2); + __ sw(a0, MemOperand(sp, 3 * kPointerSize)); + + // Argument 6: Set the number of capture registers to zero to force global + // regexps to behave as non-global. This does not affect non-global regexps. + __ mov(a0, zero_reg); __ sw(a0, MemOperand(sp, 2 * kPointerSize)); // Argument 5: static offsets vector buffer. @@ -5125,7 +5131,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Check the result. Label success; - __ Branch(&success, eq, v0, Operand(NativeRegExpMacroAssembler::SUCCESS)); + __ Branch(&success, eq, v0, Operand(1)); + // We expect exactly one result since we force the called regexp to behave + // as non-global. Label failure; __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE)); // If not exception it can only be retry. Handle that in the runtime system. @@ -7362,8 +7370,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { // KeyedStoreStubCompiler::GenerateStoreFastElement. { REG(a3), REG(a2), REG(t0), EMIT_REMEMBERED_SET }, { REG(a2), REG(a3), REG(t0), EMIT_REMEMBERED_SET }, - // ElementsTransitionGenerator::GenerateSmiOnlyToObject - // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble + // ElementsTransitionGenerator::GenerateMapChangeElementTransition + // and ElementsTransitionGenerator::GenerateSmiToDouble // and ElementsTransitionGenerator::GenerateDoubleToObject { REG(a2), REG(a3), REG(t5), EMIT_REMEMBERED_SET }, { REG(a2), REG(a3), REG(t5), OMIT_REMEMBERED_SET }, @@ -7629,9 +7637,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { Label fast_elements; __ CheckFastElements(a2, t1, &double_elements); - // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS + // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements __ JumpIfSmi(a0, &smi_element); - __ CheckFastSmiOnlyElements(a2, t1, &fast_elements); + __ CheckFastSmiElements(a2, t1, &fast_elements); // Store into the array literal requires a elements transition. Call into // the runtime. @@ -7643,7 +7651,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { __ Push(t1, t0); __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); - // Array literal has ElementsKind of FAST_ELEMENTS and value is an object. + // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. __ bind(&fast_elements); __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset)); __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize); @@ -7656,8 +7664,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { __ Ret(USE_DELAY_SLOT); __ mov(v0, a0); - // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or - // FAST_ELEMENTS, and value is Smi. + // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, + // and value is Smi. __ bind(&smi_element); __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset)); __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize); @@ -7666,7 +7674,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { __ Ret(USE_DELAY_SLOT); __ mov(v0, a0); - // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS. + // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS. __ bind(&double_elements); __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset)); __ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, a2, diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc index 9acccdc..44e0359 100644 --- a/deps/v8/src/mips/codegen-mips.cc +++ b/deps/v8/src/mips/codegen-mips.cc @@ -72,7 +72,7 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { // ------------------------------------------------------------------------- // Code generators -void ElementsTransitionGenerator::GenerateSmiOnlyToObject( +void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( MacroAssembler* masm) { // ----------- S t a t e ------------- // -- a0 : value @@ -95,7 +95,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToObject( } -void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( +void ElementsTransitionGenerator::GenerateSmiToDouble( MacroAssembler* masm, Label* fail) { // ----------- S t a t e ------------- // -- a0 : value diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc index 4ffd0ea..3ed794a 100644 --- a/deps/v8/src/mips/full-codegen-mips.cc +++ b/deps/v8/src/mips/full-codegen-mips.cc @@ -1711,7 +1711,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { ASSERT_EQ(2, constant_elements->length()); ElementsKind constant_elements_kind = static_cast(Smi::cast(constant_elements->get(0))->value()); - bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS; + bool has_fast_elements = + IsFastObjectElementsKind(constant_elements_kind); Handle constant_elements_values( FixedArrayBase::cast(constant_elements->get(1))); @@ -1733,8 +1734,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3); } else { - ASSERT(constant_elements_kind == FAST_ELEMENTS || - constant_elements_kind == FAST_SMI_ONLY_ELEMENTS || + ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) || FLAG_smi_only_arrays); FastCloneShallowArrayStub::Mode mode = has_fast_elements ? FastCloneShallowArrayStub::CLONE_ELEMENTS @@ -1763,7 +1763,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { VisitForAccumulatorValue(subexpr); - if (constant_elements_kind == FAST_ELEMENTS) { + if (IsFastObjectElementsKind(constant_elements_kind)) { int offset = FixedArray::kHeaderSize + (i * kPointerSize); __ lw(t2, MemOperand(sp)); // Copy of array literal. __ lw(a1, FieldMemOperand(t2, JSObject::kElementsOffset)); @@ -3500,104 +3500,6 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) { } -void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) { - ZoneList* args = expr->arguments(); - ASSERT(args->length() == 3); - VisitForStackValue(args->at(0)); - VisitForStackValue(args->at(1)); - VisitForStackValue(args->at(2)); - Label done; - Label slow_case; - Register object = a0; - Register index1 = a1; - Register index2 = a2; - Register elements = a3; - Register scratch1 = t0; - Register scratch2 = t1; - - __ lw(object, MemOperand(sp, 2 * kPointerSize)); - // Fetch the map and check if array is in fast case. - // Check that object doesn't require security checks and - // has no indexed interceptor. - __ GetObjectType(object, scratch1, scratch2); - __ Branch(&slow_case, ne, scratch2, Operand(JS_ARRAY_TYPE)); - // Map is now in scratch1. - - __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset)); - __ And(scratch2, scratch2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask)); - __ Branch(&slow_case, ne, scratch2, Operand(zero_reg)); - - // Check the object's elements are in fast case and writable. - __ lw(elements, FieldMemOperand(object, JSObject::kElementsOffset)); - __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); - __ LoadRoot(scratch2, Heap::kFixedArrayMapRootIndex); - __ Branch(&slow_case, ne, scratch1, Operand(scratch2)); - - // Check that both indices are smis. - __ lw(index1, MemOperand(sp, 1 * kPointerSize)); - __ lw(index2, MemOperand(sp, 0)); - __ JumpIfNotBothSmi(index1, index2, &slow_case); - - // Check that both indices are valid. - Label not_hi; - __ lw(scratch1, FieldMemOperand(object, JSArray::kLengthOffset)); - __ Branch(&slow_case, ls, scratch1, Operand(index1)); - __ Branch(¬_hi, NegateCondition(hi), scratch1, Operand(index1)); - __ Branch(&slow_case, ls, scratch1, Operand(index2)); - __ bind(¬_hi); - - // Bring the address of the elements into index1 and index2. - __ Addu(scratch1, elements, - Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ sll(index1, index1, kPointerSizeLog2 - kSmiTagSize); - __ Addu(index1, scratch1, index1); - __ sll(index2, index2, kPointerSizeLog2 - kSmiTagSize); - __ Addu(index2, scratch1, index2); - - // Swap elements. - __ lw(scratch1, MemOperand(index1, 0)); - __ lw(scratch2, MemOperand(index2, 0)); - __ sw(scratch1, MemOperand(index2, 0)); - __ sw(scratch2, MemOperand(index1, 0)); - - Label no_remembered_set; - __ CheckPageFlag(elements, - scratch1, - 1 << MemoryChunk::SCAN_ON_SCAVENGE, - ne, - &no_remembered_set); - // Possible optimization: do a check that both values are Smis - // (or them and test against Smi mask). - - // We are swapping two objects in an array and the incremental marker never - // pauses in the middle of scanning a single object. Therefore the - // incremental marker is not disturbed, so we don't need to call the - // RecordWrite stub that notifies the incremental marker. - __ RememberedSetHelper(elements, - index1, - scratch2, - kDontSaveFPRegs, - MacroAssembler::kFallThroughAtEnd); - __ RememberedSetHelper(elements, - index2, - scratch2, - kDontSaveFPRegs, - MacroAssembler::kFallThroughAtEnd); - - __ bind(&no_remembered_set); - // We are done. Drop elements from the stack, and return undefined. - __ Drop(3); - __ LoadRoot(v0, Heap::kUndefinedValueRootIndex); - __ jmp(&done); - - __ bind(&slow_case); - __ CallRuntime(Runtime::kSwapElements, 3); - - __ bind(&done); - context()->Plug(v0); -} - - void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList* args = expr->arguments(); ASSERT_EQ(2, args->length()); diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc index 964a7e2..5d530d0 100644 --- a/deps/v8/src/mips/ic-mips.cc +++ b/deps/v8/src/mips/ic-mips.cc @@ -1347,34 +1347,35 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); __ Branch(&non_double_value, ne, t0, Operand(at)); - // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS -> - // FAST_DOUBLE_ELEMENTS and complete the store. - __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + + // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS + // and complete the store. + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, t0, &slow); ASSERT(receiver_map.is(a3)); // Transition code expects map in a3 - ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow); + ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow); __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ jmp(&fast_double_without_map_check); __ bind(&non_double_value); - // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS - __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, receiver_map, t0, &slow); ASSERT(receiver_map.is(a3)); // Transition code expects map in a3 - ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm); + ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm); __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ jmp(&finish_object_store); __ bind(&transition_double_elements); - // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a - // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and - // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS + // Elements are double, but value is an Object that's not a HeapNumber. Make + // sure that the receiver is a Array with Object elements and transition array + // from double elements to Object elements. __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS, receiver_map, @@ -1471,7 +1472,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) { // Must return the modified receiver in v0. if (!FLAG_trace_elements_transitions) { Label fail; - ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail); + ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail); __ Ret(USE_DELAY_SLOT); __ mov(v0, a2); __ bind(&fail); diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc index 986921f..5c5c842 100644 --- a/deps/v8/src/mips/lithium-codegen-mips.cc +++ b/deps/v8/src/mips/lithium-codegen-mips.cc @@ -2343,6 +2343,7 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) { Register object = ToRegister(instr->object()); Register result = ToRegister(instr->result()); Register scratch = scratch0(); + int map_count = instr->hydrogen()->types()->length(); bool need_generic = instr->hydrogen()->need_generic(); @@ -2357,8 +2358,8 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) { bool last = (i == map_count - 1); Handle map = instr->hydrogen()->types()->at(i); if (last && !need_generic) { - Handle map = instr->hydrogen()->types()->last(); DeoptimizeIf(ne, instr->environment(), scratch, Operand(map)); + EmitLoadFieldOrConstantFunction(result, object, map, name); } else { Label next; __ Branch(&next, ne, scratch, Operand(map)); @@ -2447,8 +2448,10 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) { __ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset)); __ Ext(scratch, scratch, Map::kElementsKindShift, Map::kElementsKindBitCount); - __ Branch(&done, eq, scratch, - Operand(FAST_ELEMENTS)); + __ Branch(&fail, lt, scratch, + Operand(GetInitialFastElementsKind())); + __ Branch(&done, le, scratch, + Operand(TERMINAL_FAST_ELEMENTS_KIND)); __ Branch(&fail, lt, scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND)); __ Branch(&done, le, scratch, @@ -2501,7 +2504,9 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { // Load the result. __ sll(scratch, key, kPointerSizeLog2); // Key indexes words. __ addu(scratch, elements, scratch); - __ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize)); + uint32_t offset = FixedArray::kHeaderSize + + (instr->additional_index() << kPointerSizeLog2); + __ lw(result, FieldMemOperand(scratch, offset)); // Check for the hole value. if (instr->hydrogen()->RequiresHoleCheck()) { @@ -2532,17 +2537,21 @@ void LCodeGen::DoLoadKeyedFastDoubleElement( } if (key_is_constant) { - __ Addu(elements, elements, Operand(constant_key * (1 << shift_size) + - FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + __ Addu(elements, elements, + Operand(((constant_key + instr->additional_index()) << shift_size) + + FixedDoubleArray::kHeaderSize - kHeapObjectTag)); } else { __ sll(scratch, key, shift_size); __ Addu(elements, elements, Operand(scratch)); __ Addu(elements, elements, - Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) + + (instr->additional_index() << shift_size))); } - __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); - DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32)); + if (instr->hydrogen()->RequiresHoleCheck()) { + __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); + DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32)); + } __ ldc1(result, MemOperand(elements)); } @@ -2564,32 +2573,41 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( key = ToRegister(instr->key()); } int shift_size = ElementsKindToShiftSize(elements_kind); + int additional_offset = instr->additional_index() << shift_size; if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { FPURegister result = ToDoubleRegister(instr->result()); if (key_is_constant) { - __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size)); + __ Addu(scratch0(), external_pointer, constant_key << shift_size); } else { __ sll(scratch0(), key, shift_size); __ Addu(scratch0(), scratch0(), external_pointer); } if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { - __ lwc1(result, MemOperand(scratch0())); + __ lwc1(result, MemOperand(scratch0(), additional_offset)); __ cvt_d_s(result, result); } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS - __ ldc1(result, MemOperand(scratch0())); + __ ldc1(result, MemOperand(scratch0(), additional_offset)); } } else { Register result = ToRegister(instr->result()); Register scratch = scratch0(); + if (instr->additional_index() != 0 && !key_is_constant) { + __ Addu(scratch, key, instr->additional_index()); + } MemOperand mem_operand(zero_reg); if (key_is_constant) { - mem_operand = MemOperand(external_pointer, - constant_key * (1 << shift_size)); + mem_operand = + MemOperand(external_pointer, + (constant_key << shift_size) + additional_offset); } else { - __ sll(scratch, key, shift_size); + if (instr->additional_index() == 0) { + __ sll(scratch, key, shift_size); + } else { + __ sll(scratch, scratch, shift_size); + } __ Addu(scratch, scratch, external_pointer); mem_operand = MemOperand(scratch); } @@ -2622,7 +2640,10 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( case EXTERNAL_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3504,11 +3525,17 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); int offset = - ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize; + (ToInteger32(const_operand) + instr->additional_index()) * kPointerSize + + FixedArray::kHeaderSize; __ sw(value, FieldMemOperand(elements, offset)); } else { __ sll(scratch, key, kPointerSizeLog2); __ addu(scratch, elements, scratch); + if (instr->additional_index() != 0) { + __ Addu(scratch, + scratch, + instr->additional_index() << kPointerSizeLog2); + } __ sw(value, FieldMemOperand(scratch, FixedArray::kHeaderSize)); } @@ -3551,7 +3578,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement( } int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); if (key_is_constant) { - __ Addu(scratch, elements, Operand(constant_key * (1 << shift_size) + + __ Addu(scratch, elements, Operand((constant_key << shift_size) + FixedDoubleArray::kHeaderSize - kHeapObjectTag)); } else { __ sll(scratch, key, shift_size); @@ -3572,7 +3599,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement( } __ bind(¬_nan); - __ sdc1(value, MemOperand(scratch)); + __ sdc1(value, MemOperand(scratch, instr->additional_index() << shift_size)); } @@ -3593,12 +3620,13 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( key = ToRegister(instr->key()); } int shift_size = ElementsKindToShiftSize(elements_kind); + int additional_offset = instr->additional_index() << shift_size; if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { FPURegister value(ToDoubleRegister(instr->value())); if (key_is_constant) { - __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size)); + __ Addu(scratch0(), external_pointer, constant_key << shift_size); } else { __ sll(scratch0(), key, shift_size); __ Addu(scratch0(), scratch0(), external_pointer); @@ -3606,19 +3634,27 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { __ cvt_s_d(double_scratch0(), value); - __ swc1(double_scratch0(), MemOperand(scratch0())); + __ swc1(double_scratch0(), MemOperand(scratch0(), additional_offset)); } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS - __ sdc1(value, MemOperand(scratch0())); + __ sdc1(value, MemOperand(scratch0(), additional_offset)); } } else { Register value(ToRegister(instr->value())); - MemOperand mem_operand(zero_reg); Register scratch = scratch0(); + if (instr->additional_index() != 0 && !key_is_constant) { + __ Addu(scratch, key, instr->additional_index()); + } + MemOperand mem_operand(zero_reg); if (key_is_constant) { mem_operand = MemOperand(external_pointer, - constant_key * (1 << shift_size)); + ((constant_key + instr->additional_index()) + << shift_size)); } else { - __ sll(scratch, key, shift_size); + if (instr->additional_index() == 0) { + __ sll(scratch, key, shift_size); + } else { + __ sll(scratch, scratch, shift_size); + } __ Addu(scratch, scratch, external_pointer); mem_operand = MemOperand(scratch); } @@ -3640,7 +3676,10 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( case EXTERNAL_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3678,20 +3717,21 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { __ Branch(¬_applicable, ne, scratch, Operand(from_map)); __ li(new_map_reg, Operand(to_map)); - if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) { + if (IsFastSmiElementsKind(from_kind) && IsFastObjectElementsKind(to_kind)) { __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); // Write barrier. __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, scratch, kRAHasBeenSaved, kDontSaveFPRegs); - } else if (from_kind == FAST_SMI_ONLY_ELEMENTS && - to_kind == FAST_DOUBLE_ELEMENTS) { + } else if (IsFastSmiElementsKind(from_kind) && + IsFastDoubleElementsKind(to_kind)) { Register fixed_object_reg = ToRegister(instr->temp_reg()); ASSERT(fixed_object_reg.is(a2)); ASSERT(new_map_reg.is(a3)); __ mov(fixed_object_reg, object_reg); CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(), RelocInfo::CODE_TARGET, instr); - } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) { + } else if (IsFastDoubleElementsKind(from_kind) && + IsFastObjectElementsKind(to_kind)) { Register fixed_object_reg = ToRegister(instr->temp_reg()); ASSERT(fixed_object_reg.is(a2)); ASSERT(new_map_reg.is(a3)); @@ -4446,8 +4486,9 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { // Deopt if the array literal boilerplate ElementsKind is of a type different // than the expected one. The check isn't necessary if the boilerplate has - // already been converted to FAST_ELEMENTS. - if (boilerplate_elements_kind != FAST_ELEMENTS) { + // already been converted to TERMINAL_FAST_ELEMENTS_KIND. + if (CanTransitionToMoreGeneralFastElementsKind( + boilerplate_elements_kind, true)) { __ LoadHeapObject(a1, instr->hydrogen()->boilerplate_object()); // Load map into a2. __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset)); @@ -4600,10 +4641,11 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) { ElementsKind boilerplate_elements_kind = instr->hydrogen()->boilerplate()->GetElementsKind(); - // Deopt if the literal boilerplate ElementsKind is of a type different than - // the expected one. The check isn't necessary if the boilerplate has already - // been converted to FAST_ELEMENTS. - if (boilerplate_elements_kind != FAST_ELEMENTS) { + // Deopt if the array literal boilerplate ElementsKind is of a type different + // than the expected one. The check isn't necessary if the boilerplate has + // already been converted to TERMINAL_FAST_ELEMENTS_KIND. + if (CanTransitionToMoreGeneralFastElementsKind( + boilerplate_elements_kind, true)) { __ LoadHeapObject(a1, instr->hydrogen()->boilerplate()); // Load map into a2. __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset)); diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc index 1eb3ab7..49a462a 100644 --- a/deps/v8/src/mips/lithium-mips.cc +++ b/deps/v8/src/mips/lithium-mips.cc @@ -2023,8 +2023,9 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LInstruction* LChunkBuilder::DoTransitionElementsKind( HTransitionElementsKind* instr) { - if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS && - instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) { + ElementsKind from_kind = instr->original_map()->elements_kind(); + ElementsKind to_kind = instr->transitioned_map()->elements_kind(); + if (IsSimpleMapChangeTransition(from_kind, to_kind)) { LOperand* object = UseRegister(instr->object()); LOperand* new_map_reg = TempRegister(); LTransitionElementsKind* result = diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h index a04b429..f68e6ca 100644 --- a/deps/v8/src/mips/lithium-mips.h +++ b/deps/v8/src/mips/lithium-mips.h @@ -1201,6 +1201,7 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> { LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1217,13 +1218,14 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> { LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { public: - LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, - LOperand* key) { + LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, + LOperand* key) { inputs_[0] = external_pointer; inputs_[1] = key; } @@ -1237,6 +1239,7 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1705,6 +1708,7 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> { LOperand* object() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } LOperand* value() { return inputs_[2]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1727,6 +1731,7 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> { LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } LOperand* value() { return inputs_[2]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } }; @@ -1771,6 +1776,7 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> { ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc index 2c2445b..6cd5e97 100644 --- a/deps/v8/src/mips/macro-assembler-mips.cc +++ b/deps/v8/src/mips/macro-assembler-mips.cc @@ -3341,33 +3341,39 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, void MacroAssembler::CheckFastElements(Register map, Register scratch, Label* fail) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); - STATIC_ASSERT(FAST_ELEMENTS == 1); + STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); + STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); + STATIC_ASSERT(FAST_ELEMENTS == 2); + STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset)); - Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue)); + Branch(fail, hi, scratch, + Operand(Map::kMaximumBitField2FastHoleyElementValue)); } void MacroAssembler::CheckFastObjectElements(Register map, Register scratch, Label* fail) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); - STATIC_ASSERT(FAST_ELEMENTS == 1); + STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); + STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); + STATIC_ASSERT(FAST_ELEMENTS == 2); + STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset)); Branch(fail, ls, scratch, - Operand(Map::kMaximumBitField2FastSmiOnlyElementValue)); + Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); Branch(fail, hi, scratch, - Operand(Map::kMaximumBitField2FastElementValue)); + Operand(Map::kMaximumBitField2FastHoleyElementValue)); } -void MacroAssembler::CheckFastSmiOnlyElements(Register map, - Register scratch, - Label* fail) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); +void MacroAssembler::CheckFastSmiElements(Register map, + Register scratch, + Label* fail) { + STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); + STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset)); Branch(fail, hi, scratch, - Operand(Map::kMaximumBitField2FastSmiOnlyElementValue)); + Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); } @@ -3469,22 +3475,17 @@ void MacroAssembler::CompareMapAndBranch(Register obj, lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); Operand right = Operand(map); if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) { - Map* transitioned_fast_element_map( - map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL)); - ASSERT(transitioned_fast_element_map == NULL || - map->elements_kind() != FAST_ELEMENTS); - if (transitioned_fast_element_map != NULL) { - Branch(early_success, eq, scratch, right); - right = Operand(Handle(transitioned_fast_element_map)); - } - - Map* transitioned_double_map( - map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL)); - ASSERT(transitioned_double_map == NULL || - map->elements_kind() == FAST_SMI_ONLY_ELEMENTS); - if (transitioned_double_map != NULL) { - Branch(early_success, eq, scratch, right); - right = Operand(Handle(transitioned_double_map)); + ElementsKind kind = map->elements_kind(); + if (IsFastElementsKind(kind)) { + bool packed = IsFastPackedElementsKind(kind); + Map* current_map = *map; + while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) { + kind = GetNextMoreGeneralFastElementsKind(kind, packed); + current_map = current_map->LookupElementsTransitionMap(kind, NULL); + if (!current_map) break; + Branch(early_success, eq, scratch, right); + right = Operand(Handle(current_map)); + } } } @@ -4443,27 +4444,37 @@ void MacroAssembler::LoadTransitionedArrayMapConditional( lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset)); // Check that the function's map is the same as the expected cached map. - int expected_index = - Context::GetContextMapIndexFromElementsKind(expected_kind); - lw(at, MemOperand(scratch, Context::SlotOffset(expected_index))); - Branch(no_map_match, ne, map_in_out, Operand(at)); + lw(scratch, + MemOperand(scratch, + Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX))); + size_t offset = expected_kind * kPointerSize + + FixedArrayBase::kHeaderSize; + Branch(no_map_match, ne, map_in_out, Operand(scratch)); // Use the transitioned cached map. - int trans_index = - Context::GetContextMapIndexFromElementsKind(transitioned_kind); - lw(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index))); + offset = transitioned_kind * kPointerSize + + FixedArrayBase::kHeaderSize; + lw(map_in_out, FieldMemOperand(scratch, offset)); } void MacroAssembler::LoadInitialArrayMap( - Register function_in, Register scratch, Register map_out) { + Register function_in, Register scratch, + Register map_out, bool can_have_holes) { ASSERT(!function_in.is(map_out)); Label done; lw(map_out, FieldMemOperand(function_in, JSFunction::kPrototypeOrInitialMapOffset)); if (!FLAG_smi_only_arrays) { - LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, - FAST_ELEMENTS, + ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS; + LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, + kind, + map_out, + scratch, + &done); + } else if (can_have_holes) { + LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, + FAST_HOLEY_SMI_ELEMENTS, map_out, scratch, &done); diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h index f57418f..1766866 100644 --- a/deps/v8/src/mips/macro-assembler-mips.h +++ b/deps/v8/src/mips/macro-assembler-mips.h @@ -819,7 +819,8 @@ class MacroAssembler: public Assembler { // Load the initial map for new Arrays from a JSFunction. void LoadInitialArrayMap(Register function_in, Register scratch, - Register map_out); + Register map_out, + bool can_have_holes); void LoadGlobalFunction(int index, Register function); @@ -961,9 +962,9 @@ class MacroAssembler: public Assembler { // Check if a map for a JSObject indicates that the object has fast smi only // elements. Jump to the specified label if it does not. - void CheckFastSmiOnlyElements(Register map, - Register scratch, - Label* fail); + void CheckFastSmiElements(Register map, + Register scratch, + Label* fail); // Check to see if maybe_number can be stored as a double in // FastDoubleElements. If it can, store it at the index specified by key in diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/mips/regexp-macro-assembler-mips.cc index c48bcc4..8ea390e 100644 --- a/deps/v8/src/mips/regexp-macro-assembler-mips.cc +++ b/deps/v8/src/mips/regexp-macro-assembler-mips.cc @@ -43,44 +43,49 @@ namespace internal { #ifndef V8_INTERPRETED_REGEXP /* * This assembler uses the following register assignment convention + * - t7 : Temporarily stores the index of capture start after a matching pass + * for a global regexp. * - t1 : Pointer to current code object (Code*) including heap object tag. * - t2 : Current position in input, as negative offset from end of string. * Please notice that this is the byte offset, not the character offset! * - t3 : Currently loaded character. Must be loaded using * LoadCurrentCharacter before using any of the dispatch methods. - * - t4 : points to tip of backtrack stack + * - t4 : Points to tip of backtrack stack * - t5 : Unused. * - t6 : End of input (points to byte after last character in input). * - fp : Frame pointer. Used to access arguments, local variables and * RegExp registers. - * - sp : points to tip of C stack. + * - sp : Points to tip of C stack. * * The remaining registers are free for computations. * Each call to a public method should retain this convention. * * The stack will have the following structure: * - * - fp[56] direct_call (if 1, direct call from JavaScript code, + * - fp[64] Isolate* isolate (address of the current isolate) + * - fp[60] direct_call (if 1, direct call from JavaScript code, * if 0, call through the runtime system). - * - fp[52] stack_area_base (High end of the memory area to use as + * - fp[56] stack_area_base (High end of the memory area to use as * backtracking stack). + * - fp[52] capture array size (may fit multiple sets of matches) * - fp[48] int* capture_array (int[num_saved_registers_], for output). * - fp[44] secondary link/return address used by native call. * --- sp when called --- - * - fp[40] return address (lr). - * - fp[36] old frame pointer (r11). + * - fp[40] return address (lr). + * - fp[36] old frame pointer (r11). * - fp[0..32] backup of registers s0..s7. * --- frame pointer ---- - * - fp[-4] end of input (Address of end of string). - * - fp[-8] start of input (Address of first character in string). + * - fp[-4] end of input (address of end of string). + * - fp[-8] start of input (address of first character in string). * - fp[-12] start index (character index of start). * - fp[-16] void* input_string (location of a handle containing the string). - * - fp[-20] Offset of location before start of input (effectively character + * - fp[-20] success counter (only for global regexps to count matches). + * - fp[-24] Offset of location before start of input (effectively character * position -1). Used to initialize capture registers to a * non-position. - * - fp[-24] At start (if 1, we are starting at the start of the + * - fp[-28] At start (if 1, we are starting at the start of the * string, otherwise 0) - * - fp[-28] register 0 (Only positions must be stored in the first + * - fp[-32] register 0 (Only positions must be stored in the first * - register 1 num_saved_registers_ registers) * - ... * - register num_registers-1 @@ -201,8 +206,8 @@ void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) { void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) { Label not_at_start; // Did we start the match at the start of the string at all? - __ lw(a0, MemOperand(frame_pointer(), kAtStart)); - BranchOrBacktrack(¬_at_start, eq, a0, Operand(zero_reg)); + __ lw(a0, MemOperand(frame_pointer(), kStartIndex)); + BranchOrBacktrack(¬_at_start, ne, a0, Operand(zero_reg)); // If we did, are we still at the start of the input? __ lw(a1, MemOperand(frame_pointer(), kInputStart)); @@ -214,8 +219,8 @@ void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) { void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) { // Did we start the match at the start of the string at all? - __ lw(a0, MemOperand(frame_pointer(), kAtStart)); - BranchOrBacktrack(on_not_at_start, eq, a0, Operand(zero_reg)); + __ lw(a0, MemOperand(frame_pointer(), kStartIndex)); + BranchOrBacktrack(on_not_at_start, ne, a0, Operand(zero_reg)); // If we did, are we still at the start of the input? __ lw(a1, MemOperand(frame_pointer(), kInputStart)); __ Addu(a0, end_of_input_address(), Operand(current_input_offset())); @@ -640,6 +645,7 @@ void RegExpMacroAssemblerMIPS::Fail() { Handle RegExpMacroAssemblerMIPS::GetCode(Handle source) { + Label return_v0; if (masm_->has_exception()) { // If the code gets corrupted due to long regular expressions and lack of // space on trampolines, an internal exception flag is set. If this case @@ -669,8 +675,9 @@ Handle RegExpMacroAssemblerMIPS::GetCode(Handle source) { // Set frame pointer in space for it if this is not a direct call // from generated code. __ Addu(frame_pointer(), sp, Operand(4 * kPointerSize)); + __ mov(a0, zero_reg); + __ push(a0); // Make room for success counter and initialize it to 0. __ push(a0); // Make room for "position - 1" constant (value irrelevant). - __ push(a0); // Make room for "at start" constant (value irrelevant). // Check if we have space on the stack for registers. Label stack_limit_hit; @@ -689,12 +696,12 @@ Handle RegExpMacroAssemblerMIPS::GetCode(Handle source) { // Exit with OutOfMemory exception. There is not enough space on the stack // for our working registers. __ li(v0, Operand(EXCEPTION)); - __ jmp(&exit_label_); + __ jmp(&return_v0); __ bind(&stack_limit_hit); CallCheckStackGuardState(a0); // If returned value is non-zero, we exit with the returned value as result. - __ Branch(&exit_label_, ne, v0, Operand(zero_reg)); + __ Branch(&return_v0, ne, v0, Operand(zero_reg)); __ bind(&stack_ok); // Allocate space on stack for registers. @@ -715,39 +722,44 @@ Handle RegExpMacroAssemblerMIPS::GetCode(Handle source) { // position registers. __ sw(a0, MemOperand(frame_pointer(), kInputStartMinusOne)); - // Determine whether the start index is zero, that is at the start of the - // string, and store that value in a local variable. - __ mov(t5, a1); - __ li(a1, Operand(1)); - __ Movn(a1, zero_reg, t5); - __ sw(a1, MemOperand(frame_pointer(), kAtStart)); + // Initialize code pointer register + __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE); + + Label load_char_start_regexp, start_regexp; + // Load newline if index is at start, previous character otherwise. + __ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg)); + __ li(current_character(), Operand('\n')); + __ jmp(&start_regexp); + + // Global regexp restarts matching here. + __ bind(&load_char_start_regexp); + // Load previous char as initial value of current character register. + LoadCurrentCharacterUnchecked(-1, 1); + __ bind(&start_regexp); + // Initialize on-stack registers. if (num_saved_registers_ > 0) { // Always is, if generated from a regexp. // Fill saved registers with initial value = start offset - 1. - - // Address of register 0. - __ Addu(a1, frame_pointer(), Operand(kRegisterZero)); - __ li(a2, Operand(num_saved_registers_)); - Label init_loop; - __ bind(&init_loop); - __ sw(a0, MemOperand(a1)); - __ Addu(a1, a1, Operand(-kPointerSize)); - __ Subu(a2, a2, Operand(1)); - __ Branch(&init_loop, ne, a2, Operand(zero_reg)); + if (num_saved_registers_ > 8) { + // Address of register 0. + __ Addu(a1, frame_pointer(), Operand(kRegisterZero)); + __ li(a2, Operand(num_saved_registers_)); + Label init_loop; + __ bind(&init_loop); + __ sw(a0, MemOperand(a1)); + __ Addu(a1, a1, Operand(-kPointerSize)); + __ Subu(a2, a2, Operand(1)); + __ Branch(&init_loop, ne, a2, Operand(zero_reg)); + } else { + for (int i = 0; i < num_saved_registers_; i++) { + __ sw(a0, register_location(i)); + } + } } // Initialize backtrack stack pointer. __ lw(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd)); - // Initialize code pointer register - __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE); - // Load previous char as initial value of current character register. - Label at_start; - __ lw(a0, MemOperand(frame_pointer(), kAtStart)); - __ Branch(&at_start, ne, a0, Operand(zero_reg)); - LoadCurrentCharacterUnchecked(-1, 1); // Load previous char. - __ jmp(&start_label_); - __ bind(&at_start); - __ li(current_character(), Operand('\n')); + __ jmp(&start_label_); @@ -776,6 +788,10 @@ Handle RegExpMacroAssemblerMIPS::GetCode(Handle source) { for (int i = 0; i < num_saved_registers_; i += 2) { __ lw(a2, register_location(i)); __ lw(a3, register_location(i + 1)); + if (global()) { + // Keep capture start in a4 for the zero-length check later. + __ mov(t7, a2); + } if (mode_ == UC16) { __ sra(a2, a2, 1); __ Addu(a2, a2, a1); @@ -791,10 +807,52 @@ Handle RegExpMacroAssemblerMIPS::GetCode(Handle source) { __ Addu(a0, a0, kPointerSize); } } - __ li(v0, Operand(SUCCESS)); + + if (global()) { + // Restart matching if the regular expression is flagged as global. + __ lw(a0, MemOperand(frame_pointer(), kSuccessfulCaptures)); + __ lw(a1, MemOperand(frame_pointer(), kNumOutputRegisters)); + __ lw(a2, MemOperand(frame_pointer(), kRegisterOutput)); + // Increment success counter. + __ Addu(a0, a0, 1); + __ sw(a0, MemOperand(frame_pointer(), kSuccessfulCaptures)); + // Capture results have been stored, so the number of remaining global + // output registers is reduced by the number of stored captures. + __ Subu(a1, a1, num_saved_registers_); + // Check whether we have enough room for another set of capture results. + __ mov(v0, a0); + __ Branch(&return_v0, lt, a1, Operand(num_saved_registers_)); + + __ sw(a1, MemOperand(frame_pointer(), kNumOutputRegisters)); + // Advance the location for output. + __ Addu(a2, a2, num_saved_registers_ * kPointerSize); + __ sw(a2, MemOperand(frame_pointer(), kRegisterOutput)); + + // Prepare a0 to initialize registers with its value in the next run. + __ lw(a0, MemOperand(frame_pointer(), kInputStartMinusOne)); + // Special case for zero-length matches. + // t7: capture start index + // Not a zero-length match, restart. + __ Branch( + &load_char_start_regexp, ne, current_input_offset(), Operand(t7)); + // Offset from the end is zero if we already reached the end. + __ Branch(&exit_label_, eq, current_input_offset(), Operand(zero_reg)); + // Advance current position after a zero-length match. + __ Addu(current_input_offset(), + current_input_offset(), + Operand((mode_ == UC16) ? 2 : 1)); + __ Branch(&load_char_start_regexp); + } else { + __ li(v0, Operand(SUCCESS)); + } } // Exit and return v0. __ bind(&exit_label_); + if (global()) { + __ lw(v0, MemOperand(frame_pointer(), kSuccessfulCaptures)); + } + + __ bind(&return_v0); // Skip sp past regexp registers and local variables.. __ mov(sp, frame_pointer()); // Restore registers s0..s7 and return (restoring ra to pc). @@ -820,7 +878,7 @@ Handle RegExpMacroAssemblerMIPS::GetCode(Handle source) { __ MultiPop(regexp_registers_to_retain); // If returning non-zero, we should end execution with the given // result as return value. - __ Branch(&exit_label_, ne, v0, Operand(zero_reg)); + __ Branch(&return_v0, ne, v0, Operand(zero_reg)); // String might have moved: Reload end of string from frame. __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); @@ -864,7 +922,7 @@ Handle RegExpMacroAssemblerMIPS::GetCode(Handle source) { __ bind(&exit_with_exception); // Exit with Result EXCEPTION(-1) to signal thrown exception. __ li(v0, Operand(EXCEPTION)); - __ jmp(&exit_label_); + __ jmp(&return_v0); } } @@ -1012,8 +1070,9 @@ void RegExpMacroAssemblerMIPS::SetRegister(int register_index, int to) { } -void RegExpMacroAssemblerMIPS::Succeed() { +bool RegExpMacroAssemblerMIPS::Succeed() { __ jmp(&success_label_); + return global(); } @@ -1280,8 +1339,9 @@ void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset, int characters) { Register offset = current_input_offset(); if (cp_offset != 0) { - __ Addu(a0, current_input_offset(), Operand(cp_offset * char_size())); - offset = a0; + // t7 is not being used to store the capture start index at this point. + __ Addu(t7, current_input_offset(), Operand(cp_offset * char_size())); + offset = t7; } // We assume that we cannot do unaligned loads on MIPS, so this function // must only be used to load a single character at a time. diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.h b/deps/v8/src/mips/regexp-macro-assembler-mips.h index d167f62..562d3fc 100644 --- a/deps/v8/src/mips/regexp-macro-assembler-mips.h +++ b/deps/v8/src/mips/regexp-macro-assembler-mips.h @@ -115,7 +115,7 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler { virtual void ReadStackPointerFromRegister(int reg); virtual void SetCurrentPositionFromEnd(int by); virtual void SetRegister(int register_index, int to); - virtual void Succeed(); + virtual bool Succeed(); virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); virtual void ClearRegisters(int reg_from, int reg_to); virtual void WriteStackPointerToRegister(int reg); @@ -141,7 +141,8 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler { static const int kStackFrameHeader = kReturnAddress + kPointerSize; // Stack parameters placed by caller. static const int kRegisterOutput = kStackFrameHeader + 20; - static const int kStackHighEnd = kRegisterOutput + kPointerSize; + static const int kNumOutputRegisters = kRegisterOutput + kPointerSize; + static const int kStackHighEnd = kNumOutputRegisters + kPointerSize; static const int kDirectCall = kStackHighEnd + kPointerSize; static const int kIsolate = kDirectCall + kPointerSize; @@ -153,10 +154,10 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler { static const int kInputString = kStartIndex - kPointerSize; // When adding local variables remember to push space for them in // the frame in GetCode. - static const int kInputStartMinusOne = kInputString - kPointerSize; - static const int kAtStart = kInputStartMinusOne - kPointerSize; + static const int kSuccessfulCaptures = kInputString - kPointerSize; + static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize; // First register address. Following registers are below it on the stack. - static const int kRegisterZero = kAtStart - kPointerSize; + static const int kRegisterZero = kInputStartMinusOne - kPointerSize; // Initial size of code buffer. static const size_t kRegExpCodeSize = 1024; diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h index 1e72939..776badc 100644 --- a/deps/v8/src/mips/simulator-mips.h +++ b/deps/v8/src/mips/simulator-mips.h @@ -50,16 +50,16 @@ namespace internal { entry(p0, p1, p2, p3, p4) typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*, - void*, int*, Address, int, Isolate*); + void*, int*, int, Address, int, Isolate*); // Call the generated regexp code directly. The code at the entry address // should act as a function matching the type arm_regexp_matcher. // The fifth argument is a dummy that reserves the space used for // the return address added by the ExitFrame in native calls. -#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \ +#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ (FUNCTION_CAST(entry)( \ - p0, p1, p2, p3, NULL, p4, p5, p6, p7)) + p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)) #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ reinterpret_cast(try_catch_address) @@ -403,9 +403,9 @@ class Simulator { reinterpret_cast(Simulator::current(Isolate::Current())->Call( \ FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4)) -#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \ +#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ Simulator::current(Isolate::Current())->Call( \ - entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7) + entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8) #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ try_catch_address == NULL ? \ diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc index 18a5f5f..f8cf970 100644 --- a/deps/v8/src/mips/stub-cache-mips.cc +++ b/deps/v8/src/mips/stub-cache-mips.cc @@ -1585,16 +1585,29 @@ Handle CallStubCompiler::CompileArrayPushCall( __ jmp(&fast_object); // In case of fast smi-only, convert to fast object, otherwise bail out. __ bind(¬_fast_object); - __ CheckFastSmiOnlyElements(a3, t3, &call_builtin); + __ CheckFastSmiElements(a3, t3, &call_builtin); // edx: receiver // r3: map - __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + Label try_holey_map; + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, a3, t3, + &try_holey_map); + __ mov(a2, receiver); + ElementsTransitionGenerator:: + GenerateMapChangeElementsTransition(masm()); + __ jmp(&fast_object); + + __ bind(&try_holey_map); + __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS, + FAST_HOLEY_ELEMENTS, + a3, + t3, &call_builtin); __ mov(a2, receiver); - ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm()); + ElementsTransitionGenerator:: + GenerateMapChangeElementsTransition(masm()); __ bind(&fast_object); } else { __ CheckFastObjectElements(a3, a3, &call_builtin); @@ -3372,9 +3385,12 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) { case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3508,8 +3524,11 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( } break; case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3869,8 +3888,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( } break; case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3934,8 +3956,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -4106,8 +4131,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -4286,7 +4314,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( // Check that the key is a smi or a heap number convertible to a smi. GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic); - if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { + if (IsFastSmiElementsKind(elements_kind)) { __ JumpIfNotSmi(value_reg, &transition_elements_kind); } @@ -4314,7 +4342,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ bind(&finish_store); - if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { + if (IsFastSmiElementsKind(elements_kind)) { __ Addu(scratch, elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); @@ -4323,7 +4351,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ Addu(scratch, scratch, scratch2); __ sw(value_reg, MemOperand(scratch)); } else { - ASSERT(elements_kind == FAST_ELEMENTS); + ASSERT(IsFastObjectElementsKind(elements_kind)); __ Addu(scratch, elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); @@ -4332,7 +4360,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ Addu(scratch, scratch, scratch2); __ sw(value_reg, MemOperand(scratch)); __ mov(receiver_reg, value_reg); - ASSERT(elements_kind == FAST_ELEMENTS); __ RecordWrite(elements_reg, // Object. scratch, // Address. receiver_reg, // Value. diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc index 3bfb74d..11a3df9 100644 --- a/deps/v8/src/objects-debug.cc +++ b/deps/v8/src/objects-debug.cc @@ -286,12 +286,11 @@ void JSObject::JSObjectVerify() { (map()->inobject_properties() + properties()->length() - map()->NextFreePropertyIndex())); } - ASSERT_EQ((map()->has_fast_elements() || - map()->has_fast_smi_only_elements() || + ASSERT_EQ((map()->has_fast_smi_or_object_elements() || (elements() == GetHeap()->empty_fixed_array())), (elements()->map() == GetHeap()->fixed_array_map() || elements()->map() == GetHeap()->fixed_cow_array_map())); - ASSERT(map()->has_fast_elements() == HasFastElements()); + ASSERT(map()->has_fast_object_elements() == HasFastObjectElements()); } @@ -458,10 +457,17 @@ void String::StringVerify() { ConsString::cast(this)->ConsStringVerify(); } else if (IsSlicedString()) { SlicedString::cast(this)->SlicedStringVerify(); + } else if (IsSeqAsciiString()) { + SeqAsciiString::cast(this)->SeqAsciiStringVerify(); } } +void SeqAsciiString::SeqAsciiStringVerify() { + CHECK(String::IsAscii(GetChars(), length())); +} + + void ConsString::ConsStringVerify() { CHECK(this->first()->IsString()); CHECK(this->second() == GetHeap()->empty_string() || @@ -510,7 +516,7 @@ void JSGlobalProxy::JSGlobalProxyVerify() { VerifyObjectField(JSGlobalProxy::kContextOffset); // Make sure that this object has no properties, elements. CHECK_EQ(0, properties()->length()); - CHECK(HasFastElements()); + CHECK(HasFastObjectElements()); CHECK_EQ(0, FixedArray::cast(elements())->length()); } @@ -805,6 +811,11 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) { } // Indexed properties switch (GetElementsKind()) { + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: case FAST_ELEMENTS: { info->number_of_objects_with_fast_elements_++; int holes = 0; @@ -818,6 +829,14 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) { info->number_of_fast_unused_elements_ += holes; break; } + case EXTERNAL_BYTE_ELEMENTS: + case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: + case EXTERNAL_SHORT_ELEMENTS: + case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: + case EXTERNAL_INT_ELEMENTS: + case EXTERNAL_UNSIGNED_INT_ELEMENTS: + case EXTERNAL_FLOAT_ELEMENTS: + case EXTERNAL_DOUBLE_ELEMENTS: case EXTERNAL_PIXEL_ELEMENTS: { info->number_of_objects_with_fast_elements_++; ExternalPixelArray* e = ExternalPixelArray::cast(elements()); @@ -831,8 +850,7 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) { dict->Capacity() - dict->NumberOfElements(); break; } - default: - UNREACHABLE(); + case NON_STRICT_ARGUMENTS_ELEMENTS: break; } } @@ -992,6 +1010,28 @@ void NormalizedMapCache::NormalizedMapCacheVerify() { } +void Map::ZapInstanceDescriptors() { + DescriptorArray* descriptors = instance_descriptors(); + if (descriptors == GetHeap()->empty_descriptor_array()) return; + FixedArray* contents = FixedArray::cast( + descriptors->get(DescriptorArray::kContentArrayIndex)); + MemsetPointer(descriptors->data_start(), + GetHeap()->the_hole_value(), + descriptors->length()); + MemsetPointer(contents->data_start(), + GetHeap()->the_hole_value(), + contents->length()); +} + + +void Map::ZapPrototypeTransitions() { + FixedArray* proto_transitions = prototype_transitions(); + MemsetPointer(proto_transitions->data_start(), + GetHeap()->the_hole_value(), + proto_transitions->length()); +} + + #endif // DEBUG } } // namespace v8::internal diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h index eb1586a..4afbe3e 100644 --- a/deps/v8/src/objects-inl.h +++ b/deps/v8/src/objects-inl.h @@ -128,18 +128,6 @@ PropertyDetails PropertyDetails::AsDeleted() { } -bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind, - ElementsKind to_kind) { - if (to_kind == FAST_ELEMENTS) { - return from_kind == FAST_SMI_ONLY_ELEMENTS || - from_kind == FAST_DOUBLE_ELEMENTS; - } else { - return to_kind == FAST_DOUBLE_ELEMENTS && - from_kind == FAST_SMI_ONLY_ELEMENTS; - } -} - - bool Object::IsFixedArrayBase() { return IsFixedArray() || IsFixedDoubleArray(); } @@ -1244,35 +1232,26 @@ FixedArrayBase* JSObject::elements() { return static_cast(array); } -void JSObject::ValidateSmiOnlyElements() { + +void JSObject::ValidateElements() { #if DEBUG - if (map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS) { - Heap* heap = GetHeap(); - // Don't use elements, since integrity checks will fail if there - // are filler pointers in the array. - FixedArray* fixed_array = - reinterpret_cast(READ_FIELD(this, kElementsOffset)); - Map* map = fixed_array->map(); - // Arrays that have been shifted in place can't be verified. - if (map != heap->raw_unchecked_one_pointer_filler_map() && - map != heap->raw_unchecked_two_pointer_filler_map() && - map != heap->free_space_map()) { - for (int i = 0; i < fixed_array->length(); i++) { - Object* current = fixed_array->get(i); - ASSERT(current->IsSmi() || current->IsTheHole()); - } - } + if (FLAG_enable_slow_asserts) { + ElementsAccessor* accessor = GetElementsAccessor(); + accessor->Validate(this); } #endif } MaybeObject* JSObject::EnsureCanContainHeapObjectElements() { -#if DEBUG - ValidateSmiOnlyElements(); -#endif - if ((map()->elements_kind() != FAST_ELEMENTS)) { - return TransitionElementsKind(FAST_ELEMENTS); + ValidateElements(); + ElementsKind elements_kind = map()->elements_kind(); + if (!IsFastObjectElementsKind(elements_kind)) { + if (IsFastHoleyElementsKind(elements_kind)) { + return TransitionElementsKind(FAST_HOLEY_ELEMENTS); + } else { + return TransitionElementsKind(FAST_ELEMENTS); + } } return this; } @@ -1284,20 +1263,34 @@ MaybeObject* JSObject::EnsureCanContainElements(Object** objects, ElementsKind current_kind = map()->elements_kind(); ElementsKind target_kind = current_kind; ASSERT(mode != ALLOW_COPIED_DOUBLE_ELEMENTS); - if (current_kind == FAST_ELEMENTS) return this; - + bool is_holey = IsFastHoleyElementsKind(current_kind); + if (current_kind == FAST_HOLEY_ELEMENTS) return this; Heap* heap = GetHeap(); Object* the_hole = heap->the_hole_value(); Object* heap_number_map = heap->heap_number_map(); for (uint32_t i = 0; i < count; ++i) { Object* current = *objects++; - if (!current->IsSmi() && current != the_hole) { + if (current == the_hole) { + is_holey = true; + target_kind = GetHoleyElementsKind(target_kind); + } else if (!current->IsSmi()) { if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS && - HeapObject::cast(current)->map() == heap_number_map) { - target_kind = FAST_DOUBLE_ELEMENTS; + HeapObject::cast(current)->map() == heap_number_map && + IsFastSmiElementsKind(target_kind)) { + if (is_holey) { + target_kind = FAST_HOLEY_DOUBLE_ELEMENTS; + } else { + target_kind = FAST_DOUBLE_ELEMENTS; + } } else { - target_kind = FAST_ELEMENTS; - break; + if (!current->IsNumber()) { + if (is_holey) { + target_kind = FAST_HOLEY_ELEMENTS; + break; + } else { + target_kind = FAST_ELEMENTS; + } + } } } } @@ -1310,6 +1303,7 @@ MaybeObject* JSObject::EnsureCanContainElements(Object** objects, MaybeObject* JSObject::EnsureCanContainElements(FixedArrayBase* elements, + uint32_t length, EnsureElementsMode mode) { if (elements->map() != GetHeap()->fixed_double_array_map()) { ASSERT(elements->map() == GetHeap()->fixed_array_map() || @@ -1318,11 +1312,19 @@ MaybeObject* JSObject::EnsureCanContainElements(FixedArrayBase* elements, mode = DONT_ALLOW_DOUBLE_ELEMENTS; } Object** objects = FixedArray::cast(elements)->GetFirstElementAddress(); - return EnsureCanContainElements(objects, elements->length(), mode); + return EnsureCanContainElements(objects, length, mode); } ASSERT(mode == ALLOW_COPIED_DOUBLE_ELEMENTS); - if (GetElementsKind() == FAST_SMI_ONLY_ELEMENTS) { + if (GetElementsKind() == FAST_HOLEY_SMI_ELEMENTS) { + return TransitionElementsKind(FAST_HOLEY_DOUBLE_ELEMENTS); + } else if (GetElementsKind() == FAST_SMI_ELEMENTS) { + FixedDoubleArray* double_array = FixedDoubleArray::cast(elements); + for (uint32_t i = 0; i < length; ++i) { + if (double_array->is_the_hole(i)) { + return TransitionElementsKind(FAST_HOLEY_DOUBLE_ELEMENTS); + } + } return TransitionElementsKind(FAST_DOUBLE_ELEMENTS); } @@ -1334,21 +1336,20 @@ MaybeObject* JSObject::GetElementsTransitionMap(Isolate* isolate, ElementsKind to_kind) { Map* current_map = map(); ElementsKind from_kind = current_map->elements_kind(); - if (from_kind == to_kind) return current_map; Context* global_context = isolate->context()->global_context(); - if (current_map == global_context->smi_js_array_map()) { - if (to_kind == FAST_ELEMENTS) { - return global_context->object_js_array_map(); - } else { - if (to_kind == FAST_DOUBLE_ELEMENTS) { - return global_context->double_js_array_map(); - } else { - ASSERT(to_kind == DICTIONARY_ELEMENTS); + Object* maybe_array_maps = global_context->js_array_maps(); + if (maybe_array_maps->IsFixedArray()) { + FixedArray* array_maps = FixedArray::cast(maybe_array_maps); + if (array_maps->get(from_kind) == current_map) { + Object* maybe_transitioned_map = array_maps->get(to_kind); + if (maybe_transitioned_map->IsMap()) { + return Map::cast(maybe_transitioned_map); } } } + return GetElementsTransitionMapSlow(to_kind); } @@ -1357,9 +1358,6 @@ void JSObject::set_map_and_elements(Map* new_map, FixedArrayBase* value, WriteBarrierMode mode) { ASSERT(value->HasValidElements()); -#ifdef DEBUG - ValidateSmiOnlyElements(); -#endif if (new_map != NULL) { if (mode == UPDATE_WRITE_BARRIER) { set_map(new_map); @@ -1368,8 +1366,7 @@ void JSObject::set_map_and_elements(Map* new_map, set_map_no_write_barrier(new_map); } } - ASSERT((map()->has_fast_elements() || - map()->has_fast_smi_only_elements() || + ASSERT((map()->has_fast_smi_or_object_elements() || (value == GetHeap()->empty_fixed_array())) == (value->map() == GetHeap()->fixed_array_map() || value->map() == GetHeap()->fixed_cow_array_map())); @@ -1392,8 +1389,7 @@ void JSObject::initialize_properties() { void JSObject::initialize_elements() { - ASSERT(map()->has_fast_elements() || - map()->has_fast_smi_only_elements() || + ASSERT(map()->has_fast_smi_or_object_elements() || map()->has_fast_double_elements()); ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array())); WRITE_FIELD(this, kElementsOffset, GetHeap()->empty_fixed_array()); @@ -1402,9 +1398,10 @@ void JSObject::initialize_elements() { MaybeObject* JSObject::ResetElements() { Object* obj; - ElementsKind elements_kind = FLAG_smi_only_arrays - ? FAST_SMI_ONLY_ELEMENTS - : FAST_ELEMENTS; + ElementsKind elements_kind = GetInitialFastElementsKind(); + if (!FLAG_smi_only_arrays) { + elements_kind = FastSmiToObjectElementsKind(elements_kind); + } MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(), elements_kind); if (!maybe_obj->ToObject(&obj)) return maybe_obj; @@ -1676,6 +1673,11 @@ Object* FixedArray::get(int index) { } +bool FixedArray::is_the_hole(int index) { + return get(index) == GetHeap()->the_hole_value(); +} + + void FixedArray::set(int index, Smi* value) { ASSERT(map() != HEAP->fixed_cow_array_map()); ASSERT(index >= 0 && index < this->length()); @@ -2857,15 +2859,15 @@ bool Map::has_non_instance_prototype() { void Map::set_function_with_prototype(bool value) { if (value) { - set_bit_field2(bit_field2() | (1 << kFunctionWithPrototype)); + set_bit_field3(bit_field3() | (1 << kFunctionWithPrototype)); } else { - set_bit_field2(bit_field2() & ~(1 << kFunctionWithPrototype)); + set_bit_field3(bit_field3() & ~(1 << kFunctionWithPrototype)); } } bool Map::function_with_prototype() { - return ((1 << kFunctionWithPrototype) & bit_field2()) != 0; + return ((1 << kFunctionWithPrototype) & bit_field3()) != 0; } @@ -3351,6 +3353,9 @@ void Map::clear_instance_descriptors() { Object* object = READ_FIELD(this, kInstanceDescriptorsOrBitField3Offset); if (!object->IsSmi()) { +#ifdef DEBUG + ZapInstanceDescriptors(); +#endif WRITE_FIELD( this, kInstanceDescriptorsOrBitField3Offset, @@ -3376,6 +3381,11 @@ void Map::set_instance_descriptors(DescriptorArray* value, } } ASSERT(!is_shared()); +#ifdef DEBUG + if (value != instance_descriptors()) { + ZapInstanceDescriptors(); + } +#endif WRITE_FIELD(this, kInstanceDescriptorsOrBitField3Offset, value); CONDITIONAL_WRITE_BARRIER( heap, this, kInstanceDescriptorsOrBitField3Offset, value, mode); @@ -3448,6 +3458,11 @@ void Map::set_prototype_transitions(FixedArray* value, WriteBarrierMode mode) { Heap* heap = GetHeap(); ASSERT(value != heap->empty_fixed_array()); value->set(kProtoTransitionBackPointerOffset, GetBackPointer()); +#ifdef DEBUG + if (value != prototype_transitions()) { + ZapPrototypeTransitions(); + } +#endif WRITE_FIELD(this, kPrototypeTransitionsOrBackPointerOffset, value); CONDITIONAL_WRITE_BARRIER( heap, this, kPrototypeTransitionsOrBackPointerOffset, value, mode); @@ -3995,27 +4010,32 @@ MaybeObject* JSFunction::set_initial_map_and_cache_transitions( global_context->get(Context::ARRAY_FUNCTION_INDEX); if (array_function->IsJSFunction() && this == JSFunction::cast(array_function)) { - ASSERT(initial_map->elements_kind() == FAST_SMI_ONLY_ELEMENTS); - - MaybeObject* maybe_map = initial_map->CopyDropTransitions(); - Map* new_double_map = NULL; - if (!maybe_map->To(&new_double_map)) return maybe_map; - new_double_map->set_elements_kind(FAST_DOUBLE_ELEMENTS); - maybe_map = initial_map->AddElementsTransition(FAST_DOUBLE_ELEMENTS, - new_double_map); - if (maybe_map->IsFailure()) return maybe_map; - - maybe_map = new_double_map->CopyDropTransitions(); - Map* new_object_map = NULL; - if (!maybe_map->To(&new_object_map)) return maybe_map; - new_object_map->set_elements_kind(FAST_ELEMENTS); - maybe_map = new_double_map->AddElementsTransition(FAST_ELEMENTS, - new_object_map); - if (maybe_map->IsFailure()) return maybe_map; - - global_context->set_smi_js_array_map(initial_map); - global_context->set_double_js_array_map(new_double_map); - global_context->set_object_js_array_map(new_object_map); + // Replace all of the cached initial array maps in the global context with + // the appropriate transitioned elements kind maps. + Heap* heap = GetHeap(); + MaybeObject* maybe_maps = + heap->AllocateFixedArrayWithHoles(kElementsKindCount); + FixedArray* maps; + if (!maybe_maps->To(&maps)) return maybe_maps; + + Map* current_map = initial_map; + ElementsKind kind = current_map->elements_kind(); + ASSERT(kind == GetInitialFastElementsKind()); + maps->set(kind, current_map); + for (int i = GetSequenceIndexFromFastElementsKind(kind) + 1; + i < kFastElementsKindCount; ++i) { + ElementsKind transitioned_kind = GetFastElementsKindFromSequenceIndex(i); + MaybeObject* maybe_new_map = current_map->CopyDropTransitions(); + Map* new_map = NULL; + if (!maybe_new_map->To(&new_map)) return maybe_new_map; + new_map->set_elements_kind(transitioned_kind); + maybe_new_map = current_map->AddElementsTransition(transitioned_kind, + new_map); + if (maybe_new_map->IsFailure()) return maybe_new_map; + maps->set(transitioned_kind, new_map); + current_map = new_map; + } + global_context->set_js_array_maps(maps); } set_initial_map(initial_map); return this; @@ -4351,18 +4371,18 @@ ElementsKind JSObject::GetElementsKind() { FixedArrayBase* fixed_array = reinterpret_cast(READ_FIELD(this, kElementsOffset)); Map* map = fixed_array->map(); - ASSERT(((kind == FAST_ELEMENTS || kind == FAST_SMI_ONLY_ELEMENTS) && - (map == GetHeap()->fixed_array_map() || - map == GetHeap()->fixed_cow_array_map())) || - (kind == FAST_DOUBLE_ELEMENTS && - (fixed_array->IsFixedDoubleArray() || - fixed_array == GetHeap()->empty_fixed_array())) || - (kind == DICTIONARY_ELEMENTS && + ASSERT((IsFastSmiOrObjectElementsKind(kind) && + (map == GetHeap()->fixed_array_map() || + map == GetHeap()->fixed_cow_array_map())) || + (IsFastDoubleElementsKind(kind) && + (fixed_array->IsFixedDoubleArray() || + fixed_array == GetHeap()->empty_fixed_array())) || + (kind == DICTIONARY_ELEMENTS && fixed_array->IsFixedArray() && - fixed_array->IsDictionary()) || - (kind > DICTIONARY_ELEMENTS)); - ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) || - (elements()->IsFixedArray() && elements()->length() >= 2)); + fixed_array->IsDictionary()) || + (kind > DICTIONARY_ELEMENTS)); + ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) || + (elements()->IsFixedArray() && elements()->length() >= 2)); #endif return kind; } @@ -4373,25 +4393,28 @@ ElementsAccessor* JSObject::GetElementsAccessor() { } -bool JSObject::HasFastElements() { - return GetElementsKind() == FAST_ELEMENTS; +bool JSObject::HasFastObjectElements() { + return IsFastObjectElementsKind(GetElementsKind()); } -bool JSObject::HasFastSmiOnlyElements() { - return GetElementsKind() == FAST_SMI_ONLY_ELEMENTS; +bool JSObject::HasFastSmiElements() { + return IsFastSmiElementsKind(GetElementsKind()); } -bool JSObject::HasFastTypeElements() { - ElementsKind elements_kind = GetElementsKind(); - return elements_kind == FAST_SMI_ONLY_ELEMENTS || - elements_kind == FAST_ELEMENTS; +bool JSObject::HasFastSmiOrObjectElements() { + return IsFastSmiOrObjectElementsKind(GetElementsKind()); } bool JSObject::HasFastDoubleElements() { - return GetElementsKind() == FAST_DOUBLE_ELEMENTS; + return IsFastDoubleElementsKind(GetElementsKind()); +} + + +bool JSObject::HasFastHoleyElements() { + return IsFastHoleyElementsKind(GetElementsKind()); } @@ -4448,7 +4471,7 @@ bool JSObject::HasIndexedInterceptor() { MaybeObject* JSObject::EnsureWritableFastElements() { - ASSERT(HasFastTypeElements()); + ASSERT(HasFastSmiOrObjectElements()); FixedArray* elems = FixedArray::cast(elements()); Isolate* isolate = GetIsolate(); if (elems->map() != isolate->heap()->fixed_cow_array_map()) return elems; @@ -4806,7 +4829,7 @@ void Map::ClearCodeCache(Heap* heap) { void JSArray::EnsureSize(int required_size) { - ASSERT(HasFastTypeElements()); + ASSERT(HasFastSmiOrObjectElements()); FixedArray* elts = FixedArray::cast(elements()); const int kArraySizeThatFitsComfortablyInNewSpace = 128; if (elts->length() < required_size) { @@ -4838,13 +4861,13 @@ bool JSArray::AllowsSetElementsLength() { MaybeObject* JSArray::SetContent(FixedArrayBase* storage) { MaybeObject* maybe_result = EnsureCanContainElements( - storage, ALLOW_COPIED_DOUBLE_ELEMENTS); + storage, storage->length(), ALLOW_COPIED_DOUBLE_ELEMENTS); if (maybe_result->IsFailure()) return maybe_result; ASSERT((storage->map() == GetHeap()->fixed_double_array_map() && - GetElementsKind() == FAST_DOUBLE_ELEMENTS) || + IsFastDoubleElementsKind(GetElementsKind())) || ((storage->map() != GetHeap()->fixed_double_array_map()) && - ((GetElementsKind() == FAST_ELEMENTS) || - (GetElementsKind() == FAST_SMI_ONLY_ELEMENTS && + (IsFastObjectElementsKind(GetElementsKind()) || + (IsFastSmiElementsKind(GetElementsKind()) && FixedArray::cast(storage)->ContainsOnlySmisOrHoles())))); set_elements(storage); set_length(Smi::FromInt(storage->length())); diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc index febdaab..3aea5f0 100644 --- a/deps/v8/src/objects-printer.cc +++ b/deps/v8/src/objects-printer.cc @@ -318,7 +318,9 @@ void JSObject::PrintElements(FILE* out) { // Don't call GetElementsKind, its validation code can cause the printer to // fail when debugging. switch (map()->elements_kind()) { - case FAST_SMI_ONLY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: case FAST_ELEMENTS: { // Print in array notation for non-sparse arrays. FixedArray* p = FixedArray::cast(elements()); @@ -329,6 +331,7 @@ void JSObject::PrintElements(FILE* out) { } break; } + case FAST_HOLEY_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: { // Print in array notation for non-sparse arrays. if (elements()->length() > 0) { diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc index 7f75611..ba460cf 100644 --- a/deps/v8/src/objects.cc +++ b/deps/v8/src/objects.cc @@ -56,11 +56,6 @@ namespace v8 { namespace internal { -void PrintElementsKind(FILE* out, ElementsKind kind) { - ElementsAccessor* accessor = ElementsAccessor::ForKind(kind); - PrintF(out, "%s", accessor->name()); -} - MUST_USE_RESULT static MaybeObject* CreateJSValue(JSFunction* constructor, Object* value) { @@ -543,7 +538,7 @@ bool JSObject::IsDirty() { // If the object is fully fast case and has the same map it was // created with then no changes can have been made to it. return map() != fun->initial_map() - || !HasFastElements() + || !HasFastObjectElements() || !HasFastProperties(); } @@ -1067,7 +1062,9 @@ void String::StringShortPrint(StringStream* accumulator) { void JSObject::JSObjectShortPrint(StringStream* accumulator) { switch (map()->instance_type()) { case JS_ARRAY_TYPE: { - double length = JSArray::cast(this)->length()->Number(); + double length = JSArray::cast(this)->length()->IsUndefined() + ? 0 + : JSArray::cast(this)->length()->Number(); accumulator->Add("", static_cast(length)); break; } @@ -2202,34 +2199,29 @@ static Handle MaybeNull(T* p) { Handle Map::FindTransitionedMap(MapHandleList* candidates) { - ElementsKind elms_kind = elements_kind(); - if (elms_kind == FAST_DOUBLE_ELEMENTS) { - bool dummy = true; - Handle fast_map = - MaybeNull(LookupElementsTransitionMap(FAST_ELEMENTS, &dummy)); - if (!fast_map.is_null() && ContainsMap(candidates, fast_map)) { - return fast_map; - } - return Handle::null(); - } - if (elms_kind == FAST_SMI_ONLY_ELEMENTS) { - bool dummy = true; - Handle double_map = - MaybeNull(LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, &dummy)); - // In the current implementation, if the DOUBLE map doesn't exist, the - // FAST map can't exist either. - if (double_map.is_null()) return Handle::null(); - Handle fast_map = - MaybeNull(double_map->LookupElementsTransitionMap(FAST_ELEMENTS, - &dummy)); - if (!fast_map.is_null() && ContainsMap(candidates, fast_map)) { - return fast_map; - } - if (ContainsMap(candidates, double_map)) return double_map; - } - return Handle::null(); + ElementsKind kind = elements_kind(); + Handle transitioned_map = Handle::null(); + Handle current_map(this); + bool packed = IsFastPackedElementsKind(kind); + if (IsTransitionableFastElementsKind(kind)) { + while (CanTransitionToMoreGeneralFastElementsKind(kind, false)) { + kind = GetNextMoreGeneralFastElementsKind(kind, false); + bool dummy = true; + Handle maybe_transitioned_map = + MaybeNull(current_map->LookupElementsTransitionMap(kind, &dummy)); + if (maybe_transitioned_map.is_null()) break; + if (ContainsMap(candidates, maybe_transitioned_map) && + (packed || !IsFastPackedElementsKind(kind))) { + transitioned_map = maybe_transitioned_map; + if (!IsFastPackedElementsKind(kind)) packed = false; + } + current_map = maybe_transitioned_map; + } + } + return transitioned_map; } + static Map* GetElementsTransitionMapFromDescriptor(Object* descriptor_contents, ElementsKind elements_kind) { if (descriptor_contents->IsMap()) { @@ -2338,24 +2330,36 @@ Object* Map::GetDescriptorContents(String* sentinel_name, } -Map* Map::LookupElementsTransitionMap(ElementsKind elements_kind, +Map* Map::LookupElementsTransitionMap(ElementsKind to_kind, bool* safe_to_add_transition) { - // Special case: indirect SMI->FAST transition (cf. comment in - // AddElementsTransition()). - if (this->elements_kind() == FAST_SMI_ONLY_ELEMENTS && - elements_kind == FAST_ELEMENTS) { - Map* double_map = this->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, - safe_to_add_transition); - if (double_map == NULL) return double_map; - return double_map->LookupElementsTransitionMap(FAST_ELEMENTS, + ElementsKind from_kind = elements_kind(); + if (IsFastElementsKind(from_kind) && IsFastElementsKind(to_kind)) { + if (!IsMoreGeneralElementsKindTransition(from_kind, to_kind)) { + if (safe_to_add_transition) *safe_to_add_transition = false; + return NULL; + } + ElementsKind transitioned_from_kind = + GetNextMoreGeneralFastElementsKind(from_kind, false); + + + // If the transition is a single step in the transition sequence, fall + // through to looking it up and returning it. If it requires several steps, + // divide and conquer. + if (transitioned_from_kind != to_kind) { + // If the transition is several steps in the lattice, divide and conquer. + Map* from_map = LookupElementsTransitionMap(transitioned_from_kind, + safe_to_add_transition); + if (from_map == NULL) return NULL; + return from_map->LookupElementsTransitionMap(to_kind, safe_to_add_transition); + } } Object* descriptor_contents = GetDescriptorContents( elements_transition_sentinel_name(), safe_to_add_transition); if (descriptor_contents != NULL) { Map* maybe_transition_map = GetElementsTransitionMapFromDescriptor(descriptor_contents, - elements_kind); + to_kind); ASSERT(maybe_transition_map == NULL || maybe_transition_map->IsMap()); return maybe_transition_map; } @@ -2363,29 +2367,35 @@ Map* Map::LookupElementsTransitionMap(ElementsKind elements_kind, } -MaybeObject* Map::AddElementsTransition(ElementsKind elements_kind, +MaybeObject* Map::AddElementsTransition(ElementsKind to_kind, Map* transitioned_map) { - // The map transition graph should be a tree, therefore the transition - // from SMI to FAST elements is not done directly, but by going through - // DOUBLE elements first. - if (this->elements_kind() == FAST_SMI_ONLY_ELEMENTS && - elements_kind == FAST_ELEMENTS) { - bool safe_to_add = true; - Map* double_map = this->LookupElementsTransitionMap( - FAST_DOUBLE_ELEMENTS, &safe_to_add); - // This method is only called when safe_to_add_transition has been found - // to be true earlier. - ASSERT(safe_to_add); - - if (double_map == NULL) { - MaybeObject* maybe_map = this->CopyDropTransitions(); - if (!maybe_map->To(&double_map)) return maybe_map; - double_map->set_elements_kind(FAST_DOUBLE_ELEMENTS); - MaybeObject* maybe_double_transition = this->AddElementsTransition( - FAST_DOUBLE_ELEMENTS, double_map); - if (maybe_double_transition->IsFailure()) return maybe_double_transition; - } - return double_map->AddElementsTransition(FAST_ELEMENTS, transitioned_map); + ElementsKind from_kind = elements_kind(); + if (IsFastElementsKind(from_kind) && IsFastElementsKind(to_kind)) { + ASSERT(IsMoreGeneralElementsKindTransition(from_kind, to_kind)); + ElementsKind transitioned_from_kind = + GetNextMoreGeneralFastElementsKind(from_kind, false); + // The map transitions graph should be a tree, therefore transitions to + // ElementsKind that are not adjacent in the ElementsKind sequence are not + // done directly, but instead by going through intermediate ElementsKinds + // first. + if (to_kind != transitioned_from_kind) { + bool safe_to_add = true; + Map* intermediate_map = LookupElementsTransitionMap( + transitioned_from_kind, &safe_to_add); + // This method is only called when safe_to_add has been found to be true + // earlier. + ASSERT(safe_to_add); + + if (intermediate_map == NULL) { + MaybeObject* maybe_map = CopyDropTransitions(); + if (!maybe_map->To(&intermediate_map)) return maybe_map; + intermediate_map->set_elements_kind(transitioned_from_kind); + MaybeObject* maybe_transition = AddElementsTransition( + transitioned_from_kind, intermediate_map); + if (maybe_transition->IsFailure()) return maybe_transition; + } + return intermediate_map->AddElementsTransition(to_kind, transitioned_map); + } } bool safe_to_add_transition = true; @@ -2437,10 +2447,11 @@ MaybeObject* JSObject::GetElementsTransitionMapSlow(ElementsKind to_kind) { !current_map->IsUndefined() && !current_map->is_shared(); - // Prevent long chains of DICTIONARY -> FAST_ELEMENTS maps caused by objects + // Prevent long chains of DICTIONARY -> FAST_*_ELEMENTS maps caused by objects // with elements that switch back and forth between dictionary and fast - // element mode. - if (from_kind == DICTIONARY_ELEMENTS && to_kind == FAST_ELEMENTS) { + // element modes. + if (from_kind == DICTIONARY_ELEMENTS && + IsFastElementsKind(to_kind)) { safe_to_add_transition = false; } @@ -2967,12 +2978,18 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* result, // Preserve the attributes of this existing property. attributes = result->GetAttributes(); return ConvertDescriptorToField(name, value, attributes); - case CALLBACKS: - return SetPropertyWithCallback(result->GetCallbackObject(), + case CALLBACKS: { + Object* callback_object = result->GetCallbackObject(); + if (callback_object->IsAccessorPair() && + !AccessorPair::cast(callback_object)->ContainsAccessor()) { + return ConvertDescriptorToField(name, value, attributes); + } + return SetPropertyWithCallback(callback_object, name, value, result->holder(), strict_mode); + } case INTERCEPTOR: return SetPropertyWithInterceptor(name, value, attributes, strict_mode); case CONSTANT_TRANSITION: { @@ -3476,8 +3493,7 @@ MaybeObject* JSObject::NormalizeElements() { } if (array->IsDictionary()) return array; - ASSERT(HasFastElements() || - HasFastSmiOnlyElements() || + ASSERT(HasFastSmiOrObjectElements() || HasFastDoubleElements() || HasFastArgumentsElements()); // Compute the effective length and allocate a new backing store. @@ -3512,8 +3528,7 @@ MaybeObject* JSObject::NormalizeElements() { if (!maybe_value_object->ToObject(&value)) return maybe_value_object; } } else { - ASSERT(old_map->has_fast_elements() || - old_map->has_fast_smi_only_elements()); + ASSERT(old_map->has_fast_smi_or_object_elements()); value = FixedArray::cast(array)->get(i); } PropertyDetails details = PropertyDetails(NONE, NORMAL); @@ -4000,9 +4015,9 @@ MaybeObject* JSReceiver::DeleteProperty(String* name, DeleteMode mode) { bool JSObject::ReferencesObjectFromElements(FixedArray* elements, ElementsKind kind, Object* object) { - ASSERT(kind == FAST_ELEMENTS || + ASSERT(IsFastObjectElementsKind(kind) || kind == DICTIONARY_ELEMENTS); - if (kind == FAST_ELEMENTS) { + if (IsFastObjectElementsKind(kind)) { int length = IsJSArray() ? Smi::cast(JSArray::cast(this)->length())->value() : elements->length(); @@ -4054,12 +4069,15 @@ bool JSObject::ReferencesObject(Object* obj) { case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: // Raw pixels and external arrays do not reference other // objects. break; - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: break; case FAST_ELEMENTS: + case FAST_HOLEY_ELEMENTS: case DICTIONARY_ELEMENTS: { FixedArray* elements = FixedArray::cast(this->elements()); if (ReferencesObjectFromElements(elements, kind, obj)) return true; @@ -4075,7 +4093,8 @@ bool JSObject::ReferencesObject(Object* obj) { } // Check the arguments. FixedArray* arguments = FixedArray::cast(parameter_map->get(1)); - kind = arguments->IsDictionary() ? DICTIONARY_ELEMENTS : FAST_ELEMENTS; + kind = arguments->IsDictionary() ? DICTIONARY_ELEMENTS : + FAST_HOLEY_ELEMENTS; if (ReferencesObjectFromElements(arguments, kind, obj)) return true; break; } @@ -4309,7 +4328,7 @@ void JSReceiver::Lookup(String* name, LookupResult* result) { } -// Search object and it's prototype chain for callback properties. +// Search object and its prototype chain for callback properties. void JSObject::LookupCallback(String* name, LookupResult* result) { Heap* heap = GetHeap(); for (Object* current = this; @@ -4353,9 +4372,12 @@ MaybeObject* JSObject::DefineElementAccessor(uint32_t index, Object* setter, PropertyAttributes attributes) { switch (GetElementsKind()) { - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: break; case EXTERNAL_PIXEL_ELEMENTS: case EXTERNAL_BYTE_ELEMENTS: @@ -4472,7 +4494,7 @@ bool JSObject::CanSetCallback(String* name) { GetIsolate()->MayNamedAccess(this, name, v8::ACCESS_SET)); // Check if there is an API defined callback object which prohibits - // callback overwriting in this object or it's prototype chain. + // callback overwriting in this object or its prototype chain. // This mechanism is needed for instance in a browser setting, where // certain accessors such as window.location should not be allowed // to be overwritten because allowing overwriting could potentially @@ -4730,7 +4752,7 @@ MaybeObject* JSObject::DefineFastAccessor(String* name, // If the property is not a JavaScript accessor, fall back to the slow case. if (result.type() != CALLBACKS) return GetHeap()->null_value(); - Object* callback_value = result.GetValue(); + Object* callback_value = result.GetCallbackObject(); if (!callback_value->IsAccessorPair()) return GetHeap()->null_value(); AccessorPair* accessors = AccessorPair::cast(callback_value); @@ -4796,9 +4818,12 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) { // Accessors overwrite previous callbacks (cf. with getters/setters). switch (GetElementsKind()) { - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: break; case EXTERNAL_PIXEL_ELEMENTS: case EXTERNAL_BYTE_ELEMENTS: @@ -5915,21 +5940,15 @@ MaybeObject* DescriptorArray::CopyInsert(Descriptor* descriptor, int index = Search(descriptor->GetKey()); const bool replacing = (index != kNotFound); bool keep_enumeration_index = false; - if (replacing) { - // We are replacing an existing descriptor. We keep the enumeration - // index of a visible property. - PropertyType t = GetDetails(index).type(); - if (t == CONSTANT_FUNCTION || - t == FIELD || - t == CALLBACKS || - t == INTERCEPTOR) { - keep_enumeration_index = true; - } else if (remove_transitions) { - // Replaced descriptor has been counted as removed if it is - // a transition that will be replaced. Adjust count in this case. - ++new_size; - } - } else { + if (!replacing) { + ++new_size; + } else if (!IsTransitionOnly(index)) { + // We are replacing an existing descriptor. We keep the enumeration index + // of a visible property. + keep_enumeration_index = true; + } else if (remove_transitions) { + // Replaced descriptor has been counted as removed if it is a transition + // that will be replaced. Adjust count in this case. ++new_size; } @@ -8587,7 +8606,7 @@ void Code::Disassemble(const char* name, FILE* out) { MaybeObject* JSObject::SetFastElementsCapacityAndLength( int capacity, int length, - SetFastElementsCapacityMode set_capacity_mode) { + SetFastElementsCapacitySmiMode smi_mode) { Heap* heap = GetHeap(); // We should never end in here with a pixel or external array. ASSERT(!HasExternalArrayElements()); @@ -8598,34 +8617,40 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength( if (!maybe->To(&new_elements)) return maybe; } - // Find the new map to use for this object if there is a map change. - Map* new_map = NULL; - if (elements()->map() != heap->non_strict_arguments_elements_map()) { - // The resized array has FAST_SMI_ONLY_ELEMENTS if the capacity mode forces - // it, or if it's allowed and the old elements array contained only SMIs. - bool has_fast_smi_only_elements = - (set_capacity_mode == kForceSmiOnlyElements) || - ((set_capacity_mode == kAllowSmiOnlyElements) && - (elements()->map()->has_fast_smi_only_elements() || - elements() == heap->empty_fixed_array())); - ElementsKind elements_kind = has_fast_smi_only_elements - ? FAST_SMI_ONLY_ELEMENTS - : FAST_ELEMENTS; - MaybeObject* maybe = GetElementsTransitionMap(GetIsolate(), elements_kind); - if (!maybe->To(&new_map)) return maybe; + ElementsKind elements_kind = GetElementsKind(); + ElementsKind new_elements_kind; + // The resized array has FAST_*_SMI_ELEMENTS if the capacity mode forces it, + // or if it's allowed and the old elements array contained only SMIs. + bool has_fast_smi_elements = + (smi_mode == kForceSmiElements) || + ((smi_mode == kAllowSmiElements) && HasFastSmiElements()); + if (has_fast_smi_elements) { + if (IsHoleyElementsKind(elements_kind)) { + new_elements_kind = FAST_HOLEY_SMI_ELEMENTS; + } else { + new_elements_kind = FAST_SMI_ELEMENTS; + } + } else { + if (IsHoleyElementsKind(elements_kind)) { + new_elements_kind = FAST_HOLEY_ELEMENTS; + } else { + new_elements_kind = FAST_ELEMENTS; + } } - FixedArrayBase* old_elements = elements(); - ElementsKind elements_kind = GetElementsKind(); ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind); - ElementsKind to_kind = (elements_kind == FAST_SMI_ONLY_ELEMENTS) - ? FAST_SMI_ONLY_ELEMENTS - : FAST_ELEMENTS; { MaybeObject* maybe_obj = - accessor->CopyElements(this, new_elements, to_kind); + accessor->CopyElements(this, new_elements, new_elements_kind); if (maybe_obj->IsFailure()) return maybe_obj; } if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) { + Map* new_map = map(); + if (new_elements_kind != elements_kind) { + MaybeObject* maybe = + GetElementsTransitionMap(GetIsolate(), new_elements_kind); + if (!maybe->To(&new_map)) return maybe; + } + ValidateElements(); set_map_and_elements(new_map, new_elements); } else { FixedArray* parameter_map = FixedArray::cast(old_elements); @@ -8637,11 +8662,9 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength( GetElementsKind(), new_elements); } - // Update the length if necessary. if (IsJSArray()) { JSArray::cast(this)->set_length(Smi::FromInt(length)); } - return new_elements; } @@ -8659,20 +8682,28 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength( if (!maybe_obj->To(&elems)) return maybe_obj; } + ElementsKind elements_kind = GetElementsKind(); + ElementsKind new_elements_kind = elements_kind; + if (IsHoleyElementsKind(elements_kind)) { + new_elements_kind = FAST_HOLEY_DOUBLE_ELEMENTS; + } else { + new_elements_kind = FAST_DOUBLE_ELEMENTS; + } + Map* new_map; { MaybeObject* maybe_obj = - GetElementsTransitionMap(heap->isolate(), FAST_DOUBLE_ELEMENTS); + GetElementsTransitionMap(heap->isolate(), new_elements_kind); if (!maybe_obj->To(&new_map)) return maybe_obj; } FixedArrayBase* old_elements = elements(); - ElementsKind elements_kind = GetElementsKind(); ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind); { MaybeObject* maybe_obj = accessor->CopyElements(this, elems, FAST_DOUBLE_ELEMENTS); if (maybe_obj->IsFailure()) return maybe_obj; } if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) { + ValidateElements(); set_map_and_elements(new_map, elems); } else { FixedArray* parameter_map = FixedArray::cast(old_elements); @@ -8681,7 +8712,7 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength( if (FLAG_trace_elements_transitions) { PrintElementsTransition(stdout, elements_kind, old_elements, - FAST_DOUBLE_ELEMENTS, elems); + GetElementsKind(), elems); } if (IsJSArray()) { @@ -8961,8 +8992,10 @@ JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) { } switch (GetElementsKind()) { - case FAST_SMI_ONLY_ELEMENTS: - case FAST_ELEMENTS: { + case FAST_SMI_ELEMENTS: + case FAST_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: { uint32_t length = IsJSArray() ? static_cast (Smi::cast(JSArray::cast(this)->length())->value()) : @@ -8973,7 +9006,8 @@ JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) { } break; } - case FAST_DOUBLE_ELEMENTS: { + case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: { uint32_t length = IsJSArray() ? static_cast (Smi::cast(JSArray::cast(this)->length())->value()) : @@ -9257,7 +9291,7 @@ MaybeObject* JSObject::SetFastElement(uint32_t index, Object* value, StrictModeFlag strict_mode, bool check_prototype) { - ASSERT(HasFastTypeElements() || + ASSERT(HasFastSmiOrObjectElements() || HasFastArgumentsElements()); FixedArray* backing_store = FixedArray::cast(elements()); @@ -9283,13 +9317,29 @@ MaybeObject* JSObject::SetFastElement(uint32_t index, // Check if the length property of this object needs to be updated. uint32_t array_length = 0; bool must_update_array_length = false; + bool introduces_holes = true; if (IsJSArray()) { CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length)); + introduces_holes = index > array_length; if (index >= array_length) { must_update_array_length = true; array_length = index + 1; } + } else { + introduces_holes = index >= capacity; } + + // If the array is growing, and it's not growth by a single element at the + // end, make sure that the ElementsKind is HOLEY. + ElementsKind elements_kind = GetElementsKind(); + if (introduces_holes && + IsFastElementsKind(elements_kind) && + !IsFastHoleyElementsKind(elements_kind)) { + ElementsKind transitioned_kind = GetHoleyElementsKind(elements_kind); + MaybeObject* maybe = TransitionElementsKind(transitioned_kind); + if (maybe->IsFailure()) return maybe; + } + // Check if the capacity of the backing store needs to be increased, or if // a transition to slow elements is necessary. if (index >= capacity) { @@ -9309,42 +9359,44 @@ MaybeObject* JSObject::SetFastElement(uint32_t index, } } // Convert to fast double elements if appropriate. - if (HasFastSmiOnlyElements() && !value->IsSmi() && value->IsNumber()) { + if (HasFastSmiElements() && !value->IsSmi() && value->IsNumber()) { MaybeObject* maybe = SetFastDoubleElementsCapacityAndLength(new_capacity, array_length); if (maybe->IsFailure()) return maybe; FixedDoubleArray::cast(elements())->set(index, value->Number()); + ValidateElements(); return value; } - // Change elements kind from SMI_ONLY to generic FAST if necessary. - if (HasFastSmiOnlyElements() && !value->IsSmi()) { + // Change elements kind from Smi-only to generic FAST if necessary. + if (HasFastSmiElements() && !value->IsSmi()) { Map* new_map; - { MaybeObject* maybe_new_map = GetElementsTransitionMap(GetIsolate(), - FAST_ELEMENTS); - if (!maybe_new_map->To(&new_map)) return maybe_new_map; - } + ElementsKind kind = HasFastHoleyElements() + ? FAST_HOLEY_ELEMENTS + : FAST_ELEMENTS; + MaybeObject* maybe_new_map = GetElementsTransitionMap(GetIsolate(), + kind); + if (!maybe_new_map->To(&new_map)) return maybe_new_map; + set_map(new_map); - if (FLAG_trace_elements_transitions) { - PrintElementsTransition(stdout, FAST_SMI_ONLY_ELEMENTS, elements(), - FAST_ELEMENTS, elements()); - } } // Increase backing store capacity if that's been decided previously. if (new_capacity != capacity) { FixedArray* new_elements; - SetFastElementsCapacityMode set_capacity_mode = - value->IsSmi() && HasFastSmiOnlyElements() - ? kAllowSmiOnlyElements - : kDontAllowSmiOnlyElements; + SetFastElementsCapacitySmiMode smi_mode = + value->IsSmi() && HasFastSmiElements() + ? kAllowSmiElements + : kDontAllowSmiElements; { MaybeObject* maybe = SetFastElementsCapacityAndLength(new_capacity, array_length, - set_capacity_mode); + smi_mode); if (!maybe->To(&new_elements)) return maybe; } new_elements->set(index, value); + ValidateElements(); return value; } + // Finally, set the new element and length. ASSERT(elements()->IsFixedArray()); backing_store->set(index, value); @@ -9468,20 +9520,21 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index, } else { new_length = dictionary->max_number_key() + 1; } - SetFastElementsCapacityMode set_capacity_mode = FLAG_smi_only_arrays - ? kAllowSmiOnlyElements - : kDontAllowSmiOnlyElements; + SetFastElementsCapacitySmiMode smi_mode = FLAG_smi_only_arrays + ? kAllowSmiElements + : kDontAllowSmiElements; bool has_smi_only_elements = false; bool should_convert_to_fast_double_elements = ShouldConvertToFastDoubleElements(&has_smi_only_elements); if (has_smi_only_elements) { - set_capacity_mode = kForceSmiOnlyElements; + smi_mode = kForceSmiElements; } MaybeObject* result = should_convert_to_fast_double_elements ? SetFastDoubleElementsCapacityAndLength(new_length, new_length) : SetFastElementsCapacityAndLength(new_length, new_length, - set_capacity_mode); + smi_mode); + ValidateElements(); if (result->IsFailure()) return result; #ifdef DEBUG if (FLAG_trace_normalization) { @@ -9520,27 +9573,40 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement( // If the value object is not a heap number, switch to fast elements and try // again. bool value_is_smi = value->IsSmi(); + bool introduces_holes = true; + uint32_t length = elms_length; + if (IsJSArray()) { + CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length)); + introduces_holes = index > length; + } else { + introduces_holes = index >= elms_length; + } + if (!value->IsNumber()) { - Object* obj; - uint32_t length = elms_length; - if (IsJSArray()) { - CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length)); - } MaybeObject* maybe_obj = SetFastElementsCapacityAndLength( elms_length, length, - kDontAllowSmiOnlyElements); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - return SetFastElement(index, - value, - strict_mode, - check_prototype); + kDontAllowSmiElements); + if (maybe_obj->IsFailure()) return maybe_obj; + maybe_obj = SetFastElement(index, value, strict_mode, check_prototype); + if (maybe_obj->IsFailure()) return maybe_obj; + ValidateElements(); + return maybe_obj; } double double_value = value_is_smi ? static_cast(Smi::cast(value)->value()) : HeapNumber::cast(value)->value(); + // If the array is growing, and it's not growth by a single element at the + // end, make sure that the ElementsKind is HOLEY. + ElementsKind elements_kind = GetElementsKind(); + if (introduces_holes && !IsFastHoleyElementsKind(elements_kind)) { + ElementsKind transitioned_kind = GetHoleyElementsKind(elements_kind); + MaybeObject* maybe = TransitionElementsKind(transitioned_kind); + if (maybe->IsFailure()) return maybe; + } + // Check whether there is extra space in the fixed array. if (index < elms_length) { FixedDoubleArray* elms = FixedDoubleArray::cast(elements()); @@ -9562,13 +9628,11 @@ MUST_USE_RESULT MaybeObject* JSObject::SetFastDoubleElement( int new_capacity = NewElementsCapacity(index+1); if (!ShouldConvertToSlowElements(new_capacity)) { ASSERT(static_cast(new_capacity) > index); - Object* obj; - { MaybeObject* maybe_obj = - SetFastDoubleElementsCapacityAndLength(new_capacity, - index + 1); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } + MaybeObject* maybe_obj = + SetFastDoubleElementsCapacityAndLength(new_capacity, index + 1); + if (maybe_obj->IsFailure()) return maybe_obj; FixedDoubleArray::cast(elements())->set(index, double_value); + ValidateElements(); return value; } } @@ -9712,10 +9776,13 @@ MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index, (attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) == 0); Isolate* isolate = GetIsolate(); switch (GetElementsKind()) { - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: return SetFastElement(index, value, strict_mode, check_prototype); case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: return SetFastDoubleElement(index, value, strict_mode, check_prototype); case EXTERNAL_PIXEL_ELEMENTS: { ExternalPixelArray* pixels = ExternalPixelArray::cast(elements()); @@ -9806,11 +9873,19 @@ Handle JSObject::TransitionElementsKind(Handle object, MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) { ElementsKind from_kind = map()->elements_kind(); + if (IsFastHoleyElementsKind(from_kind)) { + to_kind = GetHoleyElementsKind(to_kind); + } + Isolate* isolate = GetIsolate(); - if ((from_kind == FAST_SMI_ONLY_ELEMENTS || - elements() == isolate->heap()->empty_fixed_array()) && - to_kind == FAST_ELEMENTS) { - ASSERT(from_kind != FAST_ELEMENTS); + if (elements() == isolate->heap()->empty_fixed_array() || + (IsFastSmiOrObjectElementsKind(from_kind) && + IsFastSmiOrObjectElementsKind(to_kind)) || + (from_kind == FAST_DOUBLE_ELEMENTS && + to_kind == FAST_HOLEY_DOUBLE_ELEMENTS)) { + ASSERT(from_kind != TERMINAL_FAST_ELEMENTS_KIND); + // No change is needed to the elements() buffer, the transition + // only requires a map change. MaybeObject* maybe_new_map = GetElementsTransitionMap(isolate, to_kind); Map* new_map; if (!maybe_new_map->To(&new_map)) return maybe_new_map; @@ -9837,18 +9912,21 @@ MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) { } } - if (from_kind == FAST_SMI_ONLY_ELEMENTS && - to_kind == FAST_DOUBLE_ELEMENTS) { + if (IsFastSmiElementsKind(from_kind) && + IsFastDoubleElementsKind(to_kind)) { MaybeObject* maybe_result = SetFastDoubleElementsCapacityAndLength(capacity, length); if (maybe_result->IsFailure()) return maybe_result; + ValidateElements(); return this; } - if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) { + if (IsFastDoubleElementsKind(from_kind) && + IsFastObjectElementsKind(to_kind)) { MaybeObject* maybe_result = SetFastElementsCapacityAndLength( - capacity, length, kDontAllowSmiOnlyElements); + capacity, length, kDontAllowSmiElements); if (maybe_result->IsFailure()) return maybe_result; + ValidateElements(); return this; } @@ -9862,10 +9940,14 @@ MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) { // static bool Map::IsValidElementsTransition(ElementsKind from_kind, ElementsKind to_kind) { - return - (from_kind == FAST_SMI_ONLY_ELEMENTS && - (to_kind == FAST_DOUBLE_ELEMENTS || to_kind == FAST_ELEMENTS)) || - (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS); + // Transitions can't go backwards. + if (!IsMoreGeneralElementsKindTransition(from_kind, to_kind)) { + return false; + } + + // Transitions from HOLEY -> PACKED are not allowed. + return !IsFastHoleyElementsKind(from_kind) || + IsFastHoleyElementsKind(to_kind); } @@ -9956,8 +10038,10 @@ void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) { break; } // Fall through. - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: backing_store = FixedArray::cast(backing_store_base); *capacity = backing_store->length(); for (int i = 0; i < *capacity; ++i) { @@ -9971,7 +10055,8 @@ void JSObject::GetElementsCapacityAndUsage(int* capacity, int* used) { *used = dictionary->NumberOfElements(); break; } - case FAST_DOUBLE_ELEMENTS: { + case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: { FixedDoubleArray* elms = FixedDoubleArray::cast(elements()); *capacity = elms->length(); for (int i = 0; i < *capacity; i++) { @@ -10241,16 +10326,19 @@ bool JSObject::HasRealElementProperty(uint32_t index) { if (this->IsStringObjectWithCharacterAt(index)) return true; switch (GetElementsKind()) { - case FAST_SMI_ONLY_ELEMENTS: - case FAST_ELEMENTS: { - uint32_t length = IsJSArray() ? + case FAST_SMI_ELEMENTS: + case FAST_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: { + uint32_t length = IsJSArray() ? static_cast( Smi::cast(JSArray::cast(this)->length())->value()) : static_cast(FixedArray::cast(elements())->length()); return (index < length) && !FixedArray::cast(elements())->get(index)->IsTheHole(); } - case FAST_DOUBLE_ELEMENTS: { + case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: { uint32_t length = IsJSArray() ? static_cast( Smi::cast(JSArray::cast(this)->length())->value()) : @@ -10450,7 +10538,7 @@ int JSObject::NumberOfLocalElements(PropertyAttributes filter) { int JSObject::NumberOfEnumElements() { // Fast case for objects with no elements. - if (!IsJSValue() && HasFastElements()) { + if (!IsJSValue() && HasFastObjectElements()) { uint32_t length = IsJSArray() ? static_cast( Smi::cast(JSArray::cast(this)->length())->value()) : @@ -10466,8 +10554,10 @@ int JSObject::GetLocalElementKeys(FixedArray* storage, PropertyAttributes filter) { int counter = 0; switch (GetElementsKind()) { - case FAST_SMI_ONLY_ELEMENTS: - case FAST_ELEMENTS: { + case FAST_SMI_ELEMENTS: + case FAST_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: { int length = IsJSArray() ? Smi::cast(JSArray::cast(this)->length())->value() : FixedArray::cast(elements())->length(); @@ -10482,7 +10572,8 @@ int JSObject::GetLocalElementKeys(FixedArray* storage, ASSERT(!storage || storage->length() >= counter); break; } - case FAST_DOUBLE_ELEMENTS: { + case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: { int length = IsJSArray() ? Smi::cast(JSArray::cast(this)->length())->value() : FixedDoubleArray::cast(elements())->length(); @@ -11415,10 +11506,9 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) { // Convert to fast elements. Object* obj; - { MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(), - FAST_ELEMENTS); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } + MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(), + FAST_HOLEY_ELEMENTS); + if (!maybe_obj->ToObject(&obj)) return maybe_obj; Map* new_map = Map::cast(obj); PretenureFlag tenure = heap->InNewSpace(this) ? NOT_TENURED: TENURED; @@ -11429,9 +11519,9 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) { } FixedArray* fast_elements = FixedArray::cast(new_array); dict->CopyValuesTo(fast_elements); + ValidateElements(); - set_map(new_map); - set_elements(fast_elements); + set_map_and_elements(new_map, fast_elements); } else if (HasExternalArrayElements()) { // External arrays cannot have holes or undefined elements. return Smi::FromInt(ExternalArray::cast(elements())->length()); @@ -11441,7 +11531,7 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) { if (!maybe_obj->ToObject(&obj)) return maybe_obj; } } - ASSERT(HasFastTypeElements() || HasFastDoubleElements()); + ASSERT(HasFastSmiOrObjectElements() || HasFastDoubleElements()); // Collect holes at the end, undefined before that and the rest at the // start, and return the number of non-hole, non-undefined values. diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h index 22993f2..e9dfe6c 100644 --- a/deps/v8/src/objects.h +++ b/deps/v8/src/objects.h @@ -30,6 +30,7 @@ #include "allocation.h" #include "builtins.h" +#include "elements-kind.h" #include "list.h" #include "property-details.h" #include "smart-array-pointer.h" @@ -131,40 +132,6 @@ namespace v8 { namespace internal { -enum ElementsKind { - // The "fast" kind for elements that only contain SMI values. Must be first - // to make it possible to efficiently check maps for this kind. - FAST_SMI_ONLY_ELEMENTS, - - // The "fast" kind for tagged values. Must be second to make it possible to - // efficiently check maps for this and the FAST_SMI_ONLY_ELEMENTS kind - // together at once. - FAST_ELEMENTS, - - // The "fast" kind for unwrapped, non-tagged double values. - FAST_DOUBLE_ELEMENTS, - - // The "slow" kind. - DICTIONARY_ELEMENTS, - NON_STRICT_ARGUMENTS_ELEMENTS, - // The "fast" kind for external arrays - EXTERNAL_BYTE_ELEMENTS, - EXTERNAL_UNSIGNED_BYTE_ELEMENTS, - EXTERNAL_SHORT_ELEMENTS, - EXTERNAL_UNSIGNED_SHORT_ELEMENTS, - EXTERNAL_INT_ELEMENTS, - EXTERNAL_UNSIGNED_INT_ELEMENTS, - EXTERNAL_FLOAT_ELEMENTS, - EXTERNAL_DOUBLE_ELEMENTS, - EXTERNAL_PIXEL_ELEMENTS, - - // Derived constants from ElementsKind - FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS, - LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS, - FIRST_ELEMENTS_KIND = FAST_SMI_ONLY_ELEMENTS, - LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS -}; - enum CompareMapMode { REQUIRE_EXACT_MAP, ALLOW_ELEMENT_TRANSITION_MAPS @@ -175,13 +142,6 @@ enum KeyedAccessGrowMode { ALLOW_JSARRAY_GROWTH }; -const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1; - -void PrintElementsKind(FILE* out, ElementsKind kind); - -inline bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind, - ElementsKind to_kind); - // Setter that skips the write barrier if mode is SKIP_WRITE_BARRIER. enum WriteBarrierMode { SKIP_WRITE_BARRIER, UPDATE_WRITE_BARRIER }; @@ -1510,13 +1470,19 @@ class JSObject: public JSReceiver { MUST_USE_RESULT inline MaybeObject* ResetElements(); inline ElementsKind GetElementsKind(); inline ElementsAccessor* GetElementsAccessor(); - inline bool HasFastSmiOnlyElements(); - inline bool HasFastElements(); - // Returns if an object has either FAST_ELEMENT or FAST_SMI_ONLY_ELEMENT - // elements. TODO(danno): Rename HasFastTypeElements to HasFastElements() and - // HasFastElements to HasFastObjectElements. - inline bool HasFastTypeElements(); + // Returns true if an object has elements of FAST_SMI_ELEMENTS ElementsKind. + inline bool HasFastSmiElements(); + // Returns true if an object has elements of FAST_ELEMENTS ElementsKind. + inline bool HasFastObjectElements(); + // Returns true if an object has elements of FAST_ELEMENTS or + // FAST_SMI_ONLY_ELEMENTS. + inline bool HasFastSmiOrObjectElements(); + // Returns true if an object has elements of FAST_DOUBLE_ELEMENTS + // ElementsKind. inline bool HasFastDoubleElements(); + // Returns true if an object has elements of FAST_HOLEY_*_ELEMENTS + // ElementsKind. + inline bool HasFastHoleyElements(); inline bool HasNonStrictArgumentsElements(); inline bool HasDictionaryElements(); inline bool HasExternalPixelElements(); @@ -1719,7 +1685,7 @@ class JSObject: public JSReceiver { static Handle DeleteElement(Handle obj, uint32_t index); MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode); - inline void ValidateSmiOnlyElements(); + inline void ValidateElements(); // Makes sure that this object can contain HeapObject as elements. MUST_USE_RESULT inline MaybeObject* EnsureCanContainHeapObjectElements(); @@ -1731,6 +1697,7 @@ class JSObject: public JSReceiver { EnsureElementsMode mode); MUST_USE_RESULT inline MaybeObject* EnsureCanContainElements( FixedArrayBase* elements, + uint32_t length, EnsureElementsMode mode); MUST_USE_RESULT MaybeObject* EnsureCanContainElements( Arguments* arguments, @@ -1829,10 +1796,10 @@ class JSObject: public JSReceiver { MUST_USE_RESULT MaybeObject* GetElementWithInterceptor(Object* receiver, uint32_t index); - enum SetFastElementsCapacityMode { - kAllowSmiOnlyElements, - kForceSmiOnlyElements, - kDontAllowSmiOnlyElements + enum SetFastElementsCapacitySmiMode { + kAllowSmiElements, + kForceSmiElements, + kDontAllowSmiElements }; // Replace the elements' backing store with fast elements of the given @@ -1841,7 +1808,7 @@ class JSObject: public JSReceiver { MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength( int capacity, int length, - SetFastElementsCapacityMode set_capacity_mode); + SetFastElementsCapacitySmiMode smi_mode); MUST_USE_RESULT MaybeObject* SetFastDoubleElementsCapacityAndLength( int capacity, int length); @@ -4647,17 +4614,21 @@ class Map: public HeapObject { } // Tells whether the instance has fast elements that are only Smis. - inline bool has_fast_smi_only_elements() { - return elements_kind() == FAST_SMI_ONLY_ELEMENTS; + inline bool has_fast_smi_elements() { + return IsFastSmiElementsKind(elements_kind()); } // Tells whether the instance has fast elements. - inline bool has_fast_elements() { - return elements_kind() == FAST_ELEMENTS; + inline bool has_fast_object_elements() { + return IsFastObjectElementsKind(elements_kind()); + } + + inline bool has_fast_smi_or_object_elements() { + return IsFastSmiOrObjectElementsKind(elements_kind()); } inline bool has_fast_double_elements() { - return elements_kind() == FAST_DOUBLE_ELEMENTS; + return IsFastDoubleElementsKind(elements_kind()); } inline bool has_non_strict_arguments_elements() { @@ -4855,6 +4826,14 @@ class Map: public HeapObject { Handle FindTransitionedMap(MapHandleList* candidates); Map* FindTransitionedMap(MapList* candidates); + // Zaps the contents of backing data structures in debug mode. Note that the + // heap verifier (i.e. VerifyMarkingVisitor) relies on zapping of objects + // holding weak references when incremental marking is used, because it also + // iterates over objects that are otherwise unreachable. +#ifdef DEBUG + void ZapInstanceDescriptors(); + void ZapPrototypeTransitions(); +#endif // Dispatched behavior. #ifdef OBJECT_PRINT @@ -4945,25 +4924,31 @@ class Map: public HeapObject { // Bit positions for bit field 2 static const int kIsExtensible = 0; - static const int kFunctionWithPrototype = 1; - static const int kStringWrapperSafeForDefaultValueOf = 2; - static const int kAttachedToSharedFunctionInfo = 3; + static const int kStringWrapperSafeForDefaultValueOf = 1; + static const int kAttachedToSharedFunctionInfo = 2; // No bits can be used after kElementsKindFirstBit, they are all reserved for // storing ElementKind. - static const int kElementsKindShift = 4; - static const int kElementsKindBitCount = 4; + static const int kElementsKindShift = 3; + static const int kElementsKindBitCount = 5; // Derived values from bit field 2 static const int kElementsKindMask = (-1 << kElementsKindShift) & ((1 << (kElementsKindShift + kElementsKindBitCount)) - 1); static const int8_t kMaximumBitField2FastElementValue = static_cast( (FAST_ELEMENTS + 1) << Map::kElementsKindShift) - 1; - static const int8_t kMaximumBitField2FastSmiOnlyElementValue = - static_cast((FAST_SMI_ONLY_ELEMENTS + 1) << + static const int8_t kMaximumBitField2FastSmiElementValue = + static_cast((FAST_SMI_ELEMENTS + 1) << + Map::kElementsKindShift) - 1; + static const int8_t kMaximumBitField2FastHoleyElementValue = + static_cast((FAST_HOLEY_ELEMENTS + 1) << + Map::kElementsKindShift) - 1; + static const int8_t kMaximumBitField2FastHoleySmiElementValue = + static_cast((FAST_HOLEY_SMI_ELEMENTS + 1) << Map::kElementsKindShift) - 1; // Bit positions for bit field 3 static const int kIsShared = 0; + static const int kFunctionWithPrototype = 1; // Layout of the default cache. It holds alternating name and code objects. static const int kCodeCacheEntrySize = 2; @@ -7243,6 +7228,10 @@ class SeqAsciiString: public SeqString { unsigned* offset, unsigned chars); +#ifdef DEBUG + void SeqAsciiStringVerify(); +#endif + private: DISALLOW_IMPLICIT_CONSTRUCTORS(SeqAsciiString); }; diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc index 8620519..3a7a973 100644 --- a/deps/v8/src/parser.cc +++ b/deps/v8/src/parser.cc @@ -3767,10 +3767,12 @@ Expression* Parser::ParseArrayLiteral(bool* ok) { Handle object_literals = isolate()->factory()->NewFixedArray(values->length(), TENURED); Handle double_literals; - ElementsKind elements_kind = FAST_SMI_ONLY_ELEMENTS; + ElementsKind elements_kind = FAST_SMI_ELEMENTS; bool has_only_undefined_values = true; + bool has_hole_values = false; // Fill in the literals. + Heap* heap = isolate()->heap(); bool is_simple = true; int depth = 1; for (int i = 0, n = values->length(); i < n; i++) { @@ -3779,12 +3781,18 @@ Expression* Parser::ParseArrayLiteral(bool* ok) { depth = m_literal->depth() + 1; } Handle boilerplate_value = GetBoilerplateValue(values->at(i)); - if (boilerplate_value->IsUndefined()) { + if (boilerplate_value->IsTheHole()) { + has_hole_values = true; object_literals->set_the_hole(i); if (elements_kind == FAST_DOUBLE_ELEMENTS) { double_literals->set_the_hole(i); } + } else if (boilerplate_value->IsUndefined()) { is_simple = false; + object_literals->set(i, Smi::FromInt(0)); + if (elements_kind == FAST_DOUBLE_ELEMENTS) { + double_literals->set(i, 0); + } } else { // Examine each literal element, and adjust the ElementsKind if the // literal element is not of a type that can be stored in the current @@ -3794,7 +3802,7 @@ Expression* Parser::ParseArrayLiteral(bool* ok) { // ultimately end up in FAST_ELEMENTS. has_only_undefined_values = false; object_literals->set(i, *boilerplate_value); - if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { + if (elements_kind == FAST_SMI_ELEMENTS) { // Smi only elements. Notice if a transition to FAST_DOUBLE_ELEMENTS or // FAST_ELEMENTS is required. if (!boilerplate_value->IsSmi()) { @@ -3842,7 +3850,7 @@ Expression* Parser::ParseArrayLiteral(bool* ok) { // elements array to a copy-on-write array. if (is_simple && depth == 1 && values->length() > 0 && elements_kind != FAST_DOUBLE_ELEMENTS) { - object_literals->set_map(isolate()->heap()->fixed_cow_array_map()); + object_literals->set_map(heap->fixed_cow_array_map()); } Handle element_values = elements_kind == FAST_DOUBLE_ELEMENTS @@ -3854,6 +3862,10 @@ Expression* Parser::ParseArrayLiteral(bool* ok) { Handle literals = isolate()->factory()->NewFixedArray(2, TENURED); + if (has_hole_values || !FLAG_packed_arrays) { + elements_kind = GetHoleyElementsKind(elements_kind); + } + literals->set(0, Smi::FromInt(elements_kind)); literals->set(1, *element_values); diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc index 224001d..d942d78 100644 --- a/deps/v8/src/platform-posix.cc +++ b/deps/v8/src/platform-posix.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -422,9 +422,9 @@ Socket* POSIXSocket::Accept() const { } int socket; - do + do { socket = accept(socket_, NULL, NULL); - while (socket == -1 && errno == EINTR); + } while (socket == -1 && errno == EINTR); if (socket == -1) { return NULL; @@ -452,10 +452,9 @@ bool POSIXSocket::Connect(const char* host, const char* port) { } // Connect. - do + do { status = connect(socket_, result->ai_addr, result->ai_addrlen); - while (status == -1 && errno == EINTR); - + } while (status == -1 && errno == EINTR); freeaddrinfo(result); return status == 0; } @@ -474,33 +473,29 @@ bool POSIXSocket::Shutdown() { int POSIXSocket::Send(const char* data, int len) const { - int written; - - for (written = 0; written < len; /* empty */) { + if (len <= 0) return 0; + int written = 0; + while (written < len) { int status = send(socket_, data + written, len - written, 0); if (status == 0) { break; } else if (status > 0) { written += status; - } else if (errno == EINTR) { - /* interrupted by signal, retry */ - } else { - return -1; + } else if (errno != EINTR) { + return 0; } } - return written; } int POSIXSocket::Receive(char* data, int len) const { + if (len <= 0) return 0; int status; - - do + do { status = recv(socket_, data, len, 0); - while (status == -1 && errno == EINTR); - - return status; + } while (status == -1 && errno == EINTR); + return (status < 0) ? 0 : status; } diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc index 9e377a1..2473949 100644 --- a/deps/v8/src/platform-win32.cc +++ b/deps/v8/src/platform-win32.cc @@ -1848,14 +1848,26 @@ bool Win32Socket::Shutdown() { int Win32Socket::Send(const char* data, int len) const { - int status = send(socket_, data, len, 0); - return status; + if (len <= 0) return 0; + int written = 0; + while (written < len) { + int status = send(socket_, data + written, len - written, 0); + if (status == 0) { + break; + } else if (status > 0) { + written += status; + } else { + return 0; + } + } + return written; } int Win32Socket::Receive(char* data, int len) const { + if (len <= 0) return 0; int status = recv(socket_, data, len, 0); - return status; + return (status == SOCKET_ERROR) ? 0 : status; } diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h index 168791a..a2ddf7a 100644 --- a/deps/v8/src/platform.h +++ b/deps/v8/src/platform.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -653,6 +653,7 @@ class Socket { virtual bool Shutdown() = 0; // Data Transimission + // Return 0 on failure. virtual int Send(const char* data, int len) const = 0; virtual int Receive(char* data, int len) const = 0; diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h index 9afc52f..6c64350 100644 --- a/deps/v8/src/profile-generator-inl.h +++ b/deps/v8/src/profile-generator-inl.h @@ -118,32 +118,12 @@ int HeapEntry::set_children_index(int index) { } -int HeapEntry::set_retainers_index(int index) { - retainers_index_ = index; - int next_index = index + retainers_count_; - retainers_count_ = 0; - return next_index; -} - - HeapGraphEdge** HeapEntry::children_arr() { ASSERT(children_index_ >= 0); return &snapshot_->children()[children_index_]; } -HeapGraphEdge** HeapEntry::retainers_arr() { - ASSERT(retainers_index_ >= 0); - return &snapshot_->retainers()[retainers_index_]; -} - - -HeapEntry* HeapEntry::dominator() const { - ASSERT(dominator_ >= 0); - return &snapshot_->entries()[dominator_]; -} - - SnapshotObjectId HeapObjectsMap::GetNthGcSubrootId(int delta) { return kGcRootsFirstSubrootId + delta * kObjectIdStep; } diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc index c03e526..0fe7499 100644 --- a/deps/v8/src/profile-generator.cc +++ b/deps/v8/src/profile-generator.cc @@ -964,16 +964,10 @@ HeapEntry::HeapEntry(HeapSnapshot* snapshot, const char* name, SnapshotObjectId id, int self_size) - : painted_(false), - user_reachable_(false), - dominator_(kNoEntry), - type_(type), - retainers_count_(0), - retainers_index_(-1), + : type_(type), children_count_(0), children_index_(-1), self_size_(self_size), - retained_size_(0), id_(id), snapshot_(snapshot), name_(name) { } @@ -985,7 +979,6 @@ void HeapEntry::SetNamedReference(HeapGraphEdge::Type type, HeapGraphEdge edge(type, name, this->index(), entry->index()); snapshot_->edges().Add(edge); ++children_count_; - ++entry->retainers_count_; } @@ -995,7 +988,6 @@ void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type, HeapGraphEdge edge(type, index, this->index(), entry->index()); snapshot_->edges().Add(edge); ++children_count_; - ++entry->retainers_count_; } @@ -1007,9 +999,8 @@ Handle HeapEntry::GetHeapObject() { void HeapEntry::Print( const char* prefix, const char* edge_name, int max_depth, int indent) { STATIC_CHECK(sizeof(unsigned) == sizeof(id())); - OS::Print("%6d %7d @%6u %*c %s%s: ", - self_size(), retained_size(), id(), - indent, ' ', prefix, edge_name); + OS::Print("%6d @%6u %*c %s%s: ", + self_size(), id(), indent, ' ', prefix, edge_name); if (type() != kString) { OS::Print("%s %.40s\n", TypeAsString(), name_); } else { @@ -1091,13 +1082,13 @@ template struct SnapshotSizeConstants; template <> struct SnapshotSizeConstants<4> { static const int kExpectedHeapGraphEdgeSize = 12; - static const int kExpectedHeapEntrySize = 40; + static const int kExpectedHeapEntrySize = 24; static const size_t kMaxSerializableSnapshotRawSize = 256 * MB; }; template <> struct SnapshotSizeConstants<8> { static const int kExpectedHeapGraphEdgeSize = 24; - static const int kExpectedHeapEntrySize = 48; + static const int kExpectedHeapEntrySize = 32; static const uint64_t kMaxSerializableSnapshotRawSize = static_cast(6000) * MB; }; @@ -1139,16 +1130,6 @@ void HeapSnapshot::RememberLastJSObjectId() { } -static void HeapEntryClearPaint(HeapEntry* entry_ptr) { - entry_ptr->clear_paint(); -} - - -void HeapSnapshot::ClearPaint() { - entries_.Iterate(HeapEntryClearPaint); -} - - HeapEntry* HeapSnapshot::AddRootEntry() { ASSERT(root_index_ == HeapEntry::kNoEntry); ASSERT(entries_.is_empty()); // Root entry must be the first one. @@ -1196,32 +1177,19 @@ HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type, } -void HeapSnapshot::FillChildrenAndRetainers() { +void HeapSnapshot::FillChildren() { ASSERT(children().is_empty()); children().Allocate(edges().length()); - ASSERT(retainers().is_empty()); - retainers().Allocate(edges().length()); int children_index = 0; - int retainers_index = 0; for (int i = 0; i < entries().length(); ++i) { HeapEntry* entry = &entries()[i]; children_index = entry->set_children_index(children_index); - retainers_index = entry->set_retainers_index(retainers_index); } ASSERT(edges().length() == children_index); - ASSERT(edges().length() == retainers_index); for (int i = 0; i < edges().length(); ++i) { HeapGraphEdge* edge = &edges()[i]; edge->ReplaceToIndexWithEntry(this); edge->from()->add_child(edge); - edge->to()->add_retainer(edge); - } -} - - -void HeapSnapshot::SetDominatorsToSelf() { - for (int i = 0; i < entries_.length(); ++i) { - entries_[i].set_dominator(&entries_[i]); } } @@ -1284,7 +1252,6 @@ size_t HeapSnapshot::RawSnapshotSize() const { GetMemoryUsedByList(entries_) + GetMemoryUsedByList(edges_) + GetMemoryUsedByList(children_) + - GetMemoryUsedByList(retainers_) + GetMemoryUsedByList(sorted_entries_); } @@ -2240,7 +2207,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) { void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) { - if (js_obj->HasFastElements()) { + if (js_obj->HasFastObjectElements()) { FixedArray* elements = FixedArray::cast(js_obj->elements()); int length = js_obj->IsJSArray() ? Smi::cast(JSArray::cast(js_obj)->length())->value() : @@ -3091,12 +3058,9 @@ bool HeapSnapshotGenerator::GenerateSnapshot() { if (!FillReferences()) return false; - snapshot_->FillChildrenAndRetainers(); + snapshot_->FillChildren(); snapshot_->RememberLastJSObjectId(); - if (!SetEntriesDominators()) return false; - if (!CalculateRetainedSizes()) return false; - progress_counter_ = progress_total_; if (!ProgressReport(true)) return false; return true; @@ -3138,187 +3102,6 @@ bool HeapSnapshotGenerator::FillReferences() { } -bool HeapSnapshotGenerator::IsUserGlobalReference(const HeapGraphEdge* edge) { - ASSERT(edge->from() == snapshot_->root()); - return edge->type() == HeapGraphEdge::kShortcut; -} - - -void HeapSnapshotGenerator::MarkUserReachableObjects() { - List worklist; - - Vector children = snapshot_->root()->children(); - for (int i = 0; i < children.length(); ++i) { - if (IsUserGlobalReference(children[i])) { - worklist.Add(children[i]->to()); - } - } - - while (!worklist.is_empty()) { - HeapEntry* entry = worklist.RemoveLast(); - if (entry->user_reachable()) continue; - entry->set_user_reachable(); - Vector children = entry->children(); - for (int i = 0; i < children.length(); ++i) { - HeapEntry* child = children[i]->to(); - if (!child->user_reachable()) { - worklist.Add(child); - } - } - } -} - - -static bool IsRetainingEdge(HeapGraphEdge* edge) { - if (edge->type() == HeapGraphEdge::kShortcut) return false; - // The edge is not retaining if it goes from system domain - // (i.e. an object not reachable from window) to the user domain - // (i.e. a reachable object). - return edge->from()->user_reachable() - || !edge->to()->user_reachable(); -} - - -void HeapSnapshotGenerator::FillPostorderIndexes( - Vector* entries) { - snapshot_->ClearPaint(); - int current_entry = 0; - List nodes_to_visit; - HeapEntry* root = snapshot_->root(); - nodes_to_visit.Add(root); - snapshot_->root()->paint(); - while (!nodes_to_visit.is_empty()) { - HeapEntry* entry = nodes_to_visit.last(); - Vector children = entry->children(); - bool has_new_edges = false; - for (int i = 0; i < children.length(); ++i) { - if (entry != root && !IsRetainingEdge(children[i])) continue; - HeapEntry* child = children[i]->to(); - if (!child->painted()) { - nodes_to_visit.Add(child); - child->paint(); - has_new_edges = true; - } - } - if (!has_new_edges) { - entry->set_postorder_index(current_entry); - (*entries)[current_entry++] = entry; - nodes_to_visit.RemoveLast(); - } - } - ASSERT_EQ(current_entry, entries->length()); -} - - -static int Intersect(int i1, int i2, const Vector& dominators) { - int finger1 = i1, finger2 = i2; - while (finger1 != finger2) { - while (finger1 < finger2) finger1 = dominators[finger1]; - while (finger2 < finger1) finger2 = dominators[finger2]; - } - return finger1; -} - - -// The algorithm is based on the article: -// K. Cooper, T. Harvey and K. Kennedy "A Simple, Fast Dominance Algorithm" -// Softw. Pract. Exper. 4 (2001), pp. 1-10. -bool HeapSnapshotGenerator::BuildDominatorTree( - const Vector& entries, - Vector* dominators) { - if (entries.length() == 0) return true; - HeapEntry* root = snapshot_->root(); - const int entries_length = entries.length(), root_index = entries_length - 1; - for (int i = 0; i < root_index; ++i) (*dominators)[i] = HeapEntry::kNoEntry; - (*dominators)[root_index] = root_index; - - // The affected array is used to mark entries which dominators - // have to be racalculated because of changes in their retainers. - ScopedVector affected(entries_length); - for (int i = 0; i < affected.length(); ++i) affected[i] = false; - // Mark the root direct children as affected. - Vector children = entries[root_index]->children(); - for (int i = 0; i < children.length(); ++i) { - affected[children[i]->to()->postorder_index()] = true; - } - - bool changed = true; - while (changed) { - changed = false; - if (!ProgressReport(false)) return false; - for (int i = root_index - 1; i >= 0; --i) { - if (!affected[i]) continue; - affected[i] = false; - // If dominator of the entry has already been set to root, - // then it can't propagate any further. - if ((*dominators)[i] == root_index) continue; - int new_idom_index = HeapEntry::kNoEntry; - Vector rets = entries[i]->retainers(); - for (int j = 0; j < rets.length(); ++j) { - if (rets[j]->from() != root && !IsRetainingEdge(rets[j])) continue; - int ret_index = rets[j]->from()->postorder_index(); - if (dominators->at(ret_index) != HeapEntry::kNoEntry) { - new_idom_index = new_idom_index == HeapEntry::kNoEntry - ? ret_index - : Intersect(ret_index, new_idom_index, *dominators); - // If idom has already reached the root, it doesn't make sense - // to check other retainers. - if (new_idom_index == root_index) break; - } - } - if (new_idom_index != HeapEntry::kNoEntry - && dominators->at(i) != new_idom_index) { - (*dominators)[i] = new_idom_index; - changed = true; - Vector children = entries[i]->children(); - for (int j = 0; j < children.length(); ++j) { - affected[children[j]->to()->postorder_index()] = true; - } - } - } - } - return true; -} - - -bool HeapSnapshotGenerator::SetEntriesDominators() { - MarkUserReachableObjects(); - // This array is used for maintaining postorder of nodes. - ScopedVector ordered_entries(snapshot_->entries().length()); - FillPostorderIndexes(&ordered_entries); - ScopedVector dominators(ordered_entries.length()); - if (!BuildDominatorTree(ordered_entries, &dominators)) return false; - for (int i = 0; i < ordered_entries.length(); ++i) { - ASSERT(dominators[i] != HeapEntry::kNoEntry); - ordered_entries[i]->set_dominator(ordered_entries[dominators[i]]); - } - return true; -} - - -bool HeapSnapshotGenerator::CalculateRetainedSizes() { - // As for the dominators tree we only know parent nodes, not - // children, to sum up total sizes we "bubble" node's self size - // adding it to all of its parents. - List& entries = snapshot_->entries(); - for (int i = 0; i < entries.length(); ++i) { - HeapEntry* entry = &entries[i]; - entry->set_retained_size(entry->self_size()); - } - for (int i = 0; i < entries.length(); ++i) { - int entry_size = entries[i].self_size(); - HeapEntry* current = &entries[i]; - for (HeapEntry* dominator = current->dominator(); - dominator != current; - current = dominator, dominator = current->dominator()) { - ASSERT(current->dominator() != NULL); - dominator->add_retained_size(entry_size); - } - } - return true; -} - - template struct MaxDecimalDigitsIn; template<> struct MaxDecimalDigitsIn<4> { static const int kSigned = 11; @@ -3417,8 +3200,8 @@ class OutputStreamWriter { // type, name|index, to_node. const int HeapSnapshotJSONSerializer::kEdgeFieldsCount = 3; -// type, name, id, self_size, retained_size, dominator, children_index. -const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 7; +// type, name, id, self_size, children_index. +const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 5; void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) { ASSERT(writer_ == NULL); @@ -3458,8 +3241,7 @@ HeapSnapshot* HeapSnapshotJSONSerializer::CreateFakeSnapshot() { (snapshot_->RawSnapshotSize() + MB - 1) / MB); HeapEntry* message = result->AddEntry(HeapEntry::kString, text, 0, 4); result->root()->SetIndexedReference(HeapGraphEdge::kElement, 1, message); - result->FillChildrenAndRetainers(); - result->SetDominatorsToSelf(); + result->FillChildren(); return result; } @@ -3557,11 +3339,10 @@ void HeapSnapshotJSONSerializer::SerializeEdges(const List& nodes) { void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry, int edges_index) { - // The buffer needs space for 6 ints, 1 uint32_t, 7 commas, \n and \0 + // The buffer needs space for 5 uint32_t, 5 commas, \n and \0 static const int kBufferSize = - 6 * MaxDecimalDigitsIn::kSigned // NOLINT - + MaxDecimalDigitsIn::kUnsigned // NOLINT - + 7 + 1 + 1; + 5 * MaxDecimalDigitsIn::kUnsigned // NOLINT + + 5 + 1 + 1; EmbeddedVector buffer; int buffer_pos = 0; if (entry_index(entry) != 0) { @@ -3575,10 +3356,6 @@ void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry, buffer[buffer_pos++] = ','; buffer_pos = utoa(entry->self_size(), buffer, buffer_pos); buffer[buffer_pos++] = ','; - buffer_pos = utoa(entry->retained_size(), buffer, buffer_pos); - buffer[buffer_pos++] = ','; - buffer_pos = utoa(entry_index(entry->dominator()), buffer, buffer_pos); - buffer[buffer_pos++] = ','; buffer_pos = utoa(edges_index, buffer, buffer_pos); buffer[buffer_pos++] = '\n'; buffer[buffer_pos++] = '\0'; @@ -3606,17 +3383,15 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() { writer_->AddString(",\"meta\":"); // The object describing node serialization layout. // We use a set of macros to improve readability. -#define JSON_A(s) "["s"]" -#define JSON_O(s) "{"s"}" -#define JSON_S(s) "\""s"\"" +#define JSON_A(s) "[" s "]" +#define JSON_O(s) "{" s "}" +#define JSON_S(s) "\"" s "\"" writer_->AddString(JSON_O( JSON_S("node_fields") ":" JSON_A( JSON_S("type") "," JSON_S("name") "," JSON_S("id") "," JSON_S("self_size") "," - JSON_S("retained_size") "," - JSON_S("dominator") "," JSON_S("edges_index")) "," JSON_S("node_types") ":" JSON_A( JSON_A( diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h index 92896c2..349226b 100644 --- a/deps/v8/src/profile-generator.h +++ b/deps/v8/src/profile-generator.h @@ -529,35 +529,14 @@ class HeapEntry BASE_EMBEDDED { void set_name(const char* name) { name_ = name; } inline SnapshotObjectId id() { return id_; } int self_size() { return self_size_; } - int retained_size() { return retained_size_; } - void add_retained_size(int size) { retained_size_ += size; } - void set_retained_size(int size) { retained_size_ = size; } INLINE(int index() const); - int postorder_index() { return postorder_index_; } - void set_postorder_index(int value) { postorder_index_ = value; } int children_count() const { return children_count_; } INLINE(int set_children_index(int index)); - INLINE(int set_retainers_index(int index)); void add_child(HeapGraphEdge* edge) { children_arr()[children_count_++] = edge; } - void add_retainer(HeapGraphEdge* edge) { - retainers_arr()[retainers_count_++] = edge; - } Vector children() { return Vector(children_arr(), children_count_); } - Vector retainers() { - return Vector(retainers_arr(), retainers_count_); } - INLINE(HeapEntry* dominator() const); - void set_dominator(HeapEntry* entry) { - ASSERT(entry != NULL); - dominator_ = entry->index(); - } - void clear_paint() { painted_ = false; } - bool painted() { return painted_; } - void paint() { painted_ = true; } - bool user_reachable() { return user_reachable_; } - void set_user_reachable() { user_reachable_ = true; } void SetIndexedReference( HeapGraphEdge::Type type, int index, HeapEntry* entry); @@ -571,22 +550,12 @@ class HeapEntry BASE_EMBEDDED { private: INLINE(HeapGraphEdge** children_arr()); - INLINE(HeapGraphEdge** retainers_arr()); const char* TypeAsString(); - unsigned painted_: 1; - unsigned user_reachable_: 1; - int dominator_: 30; unsigned type_: 4; - int retainers_count_: 28; - int retainers_index_; - int children_count_; + int children_count_: 28; int children_index_; int self_size_; - union { - int postorder_index_; // Used during dominator tree building. - int retained_size_; // At that moment, there is no retained size yet. - }; SnapshotObjectId id_; HeapSnapshot* snapshot_; const char* name_; @@ -626,7 +595,6 @@ class HeapSnapshot { List& entries() { return entries_; } List& edges() { return edges_; } List& children() { return children_; } - List& retainers() { return retainers_; } void RememberLastJSObjectId(); SnapshotObjectId max_snapshot_js_object_id() const { return max_snapshot_js_object_id_; @@ -640,11 +608,9 @@ class HeapSnapshot { HeapEntry* AddGcRootsEntry(); HeapEntry* AddGcSubrootEntry(int tag); HeapEntry* AddNativesRootEntry(); - void ClearPaint(); HeapEntry* GetEntryById(SnapshotObjectId id); List* GetSortedEntriesList(); - void SetDominatorsToSelf(); - void FillChildrenAndRetainers(); + void FillChildren(); void Print(int max_depth); void PrintEntriesSize(); @@ -661,7 +627,6 @@ class HeapSnapshot { List entries_; List edges_; List children_; - List retainers_; List sorted_entries_; SnapshotObjectId max_snapshot_js_object_id_; @@ -1061,16 +1026,9 @@ class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface { bool GenerateSnapshot(); private: - bool BuildDominatorTree(const Vector& entries, - Vector* dominators); - bool CalculateRetainedSizes(); bool FillReferences(); - void FillPostorderIndexes(Vector* entries); - bool IsUserGlobalReference(const HeapGraphEdge* edge); - void MarkUserReachableObjects(); void ProgressStep(); bool ProgressReport(bool force = false); - bool SetEntriesDominators(); void SetProgressTotal(int iterations_count); HeapSnapshot* snapshot_; diff --git a/deps/v8/src/regexp-macro-assembler-irregexp.cc b/deps/v8/src/regexp-macro-assembler-irregexp.cc index aa67919..363b1ab 100644 --- a/deps/v8/src/regexp-macro-assembler-irregexp.cc +++ b/deps/v8/src/regexp-macro-assembler-irregexp.cc @@ -203,8 +203,9 @@ void RegExpMacroAssemblerIrregexp::PushBacktrack(Label* l) { } -void RegExpMacroAssemblerIrregexp::Succeed() { +bool RegExpMacroAssemblerIrregexp::Succeed() { Emit(BC_SUCCEED, 0); + return false; // Restart matching for global regexp not supported. } diff --git a/deps/v8/src/regexp-macro-assembler-irregexp.h b/deps/v8/src/regexp-macro-assembler-irregexp.h index 25cb68d..d64a3d8 100644 --- a/deps/v8/src/regexp-macro-assembler-irregexp.h +++ b/deps/v8/src/regexp-macro-assembler-irregexp.h @@ -1,4 +1,4 @@ -// Copyright 2008-2009 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -59,7 +59,7 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler { virtual void Backtrack(); virtual void GoTo(Label* label); virtual void PushBacktrack(Label* label); - virtual void Succeed(); + virtual bool Succeed(); virtual void Fail(); virtual void PopRegister(int register_index); virtual void PushRegister(int register_index, diff --git a/deps/v8/src/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp-macro-assembler-tracer.cc index b7aeac4..c45fd44 100644 --- a/deps/v8/src/regexp-macro-assembler-tracer.cc +++ b/deps/v8/src/regexp-macro-assembler-tracer.cc @@ -1,4 +1,4 @@ -// Copyright 2008 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -102,14 +102,15 @@ void RegExpMacroAssemblerTracer::PushBacktrack(Label* label) { } -void RegExpMacroAssemblerTracer::Succeed() { - PrintF(" Succeed();\n"); - assembler_->Succeed(); +bool RegExpMacroAssemblerTracer::Succeed() { + bool restart = assembler_->Succeed(); + PrintF(" Succeed();%s\n", restart ? " [restart for global match]" : ""); + return restart; } void RegExpMacroAssemblerTracer::Fail() { - PrintF(" Fail();\n"); + PrintF(" Fail();"); assembler_->Fail(); } diff --git a/deps/v8/src/regexp-macro-assembler-tracer.h b/deps/v8/src/regexp-macro-assembler-tracer.h index 3fd4d8b..a915835 100644 --- a/deps/v8/src/regexp-macro-assembler-tracer.h +++ b/deps/v8/src/regexp-macro-assembler-tracer.h @@ -98,7 +98,7 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler { virtual void ReadStackPointerFromRegister(int reg); virtual void SetCurrentPositionFromEnd(int by); virtual void SetRegister(int register_index, int to); - virtual void Succeed(); + virtual bool Succeed(); virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); virtual void ClearRegisters(int reg_from, int reg_to); virtual void WriteStackPointerToRegister(int reg); diff --git a/deps/v8/src/regexp-macro-assembler.cc b/deps/v8/src/regexp-macro-assembler.cc index b6fb3c5..08568de 100644 --- a/deps/v8/src/regexp-macro-assembler.cc +++ b/deps/v8/src/regexp-macro-assembler.cc @@ -1,4 +1,4 @@ -// Copyright 2008 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -35,7 +35,9 @@ namespace v8 { namespace internal { -RegExpMacroAssembler::RegExpMacroAssembler() : slow_safe_compiler_(false) { +RegExpMacroAssembler::RegExpMacroAssembler() + : slow_safe_compiler_(false), + global_(false) { } @@ -149,6 +151,7 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match( input_start, input_end, offsets_vector, + offsets_vector_length, isolate); return res; } @@ -161,6 +164,7 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute( const byte* input_start, const byte* input_end, int* output, + int output_size, Isolate* isolate) { ASSERT(isolate == Isolate::Current()); // Ensure that the minimum stack has been allocated. @@ -174,10 +178,10 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute( input_start, input_end, output, + output_size, stack_base, direct_call, isolate); - ASSERT(result <= SUCCESS); ASSERT(result >= RETRY); if (result == EXCEPTION && !isolate->has_pending_exception()) { diff --git a/deps/v8/src/regexp-macro-assembler.h b/deps/v8/src/regexp-macro-assembler.h index 8587435..5b2cf4a 100644 --- a/deps/v8/src/regexp-macro-assembler.h +++ b/deps/v8/src/regexp-macro-assembler.h @@ -1,4 +1,4 @@ -// Copyright 2008 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -174,7 +174,8 @@ class RegExpMacroAssembler { virtual void ReadStackPointerFromRegister(int reg) = 0; virtual void SetCurrentPositionFromEnd(int by) = 0; virtual void SetRegister(int register_index, int to) = 0; - virtual void Succeed() = 0; + // Return whether the matching (with a global regexp) will be restarted. + virtual bool Succeed() = 0; virtual void WriteCurrentPositionToRegister(int reg, int cp_offset) = 0; virtual void ClearRegisters(int reg_from, int reg_to) = 0; virtual void WriteStackPointerToRegister(int reg) = 0; @@ -183,8 +184,14 @@ class RegExpMacroAssembler { void set_slow_safe(bool ssc) { slow_safe_compiler_ = ssc; } bool slow_safe() { return slow_safe_compiler_; } + // Set whether the regular expression has the global flag. Exiting due to + // a failure in a global regexp may still mean success overall. + void set_global(bool global) { global_ = global; } + bool global() { return global_; } + private: bool slow_safe_compiler_; + bool global_; }; @@ -249,6 +256,7 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler { const byte* input_start, const byte* input_end, int* output, + int output_size, Isolate* isolate); }; diff --git a/deps/v8/src/regexp.js b/deps/v8/src/regexp.js index a574f62..3809039 100644 --- a/deps/v8/src/regexp.js +++ b/deps/v8/src/regexp.js @@ -278,6 +278,10 @@ function TrimRegExp(regexp) { function RegExpToString() { + if (!IS_REGEXP(this)) { + throw MakeTypeError('incompatible_method_receiver', + ['RegExp.prototype.toString', this]); + } var result = '/' + this.source + '/'; if (this.global) result += 'g'; if (this.ignoreCase) result += 'i'; @@ -423,6 +427,7 @@ function SetUpRegExp() { LAST_INPUT(lastMatchInfo) = ToString(string); }; + %OptimizeObjectForAddingMultipleProperties($RegExp, 22); %DefineOrRedefineAccessorProperty($RegExp, 'input', RegExpGetInput, RegExpSetInput, DONT_DELETE); %DefineOrRedefineAccessorProperty($RegExp, '$_', RegExpGetInput, @@ -477,6 +482,7 @@ function SetUpRegExp() { RegExpMakeCaptureGetter(i), NoOpSetter, DONT_DELETE); } + %ToFastProperties($RegExp); } SetUpRegExp(); diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc index 0b80eff..d18a158 100644 --- a/deps/v8/src/runtime.cc +++ b/deps/v8/src/runtime.cc @@ -208,8 +208,10 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate, // Pixel elements cannot be created using an object literal. ASSERT(!copy->HasExternalArrayElements()); switch (copy->GetElementsKind()) { - case FAST_SMI_ONLY_ELEMENTS: - case FAST_ELEMENTS: { + case FAST_SMI_ELEMENTS: + case FAST_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: { FixedArray* elements = FixedArray::cast(copy->elements()); if (elements->map() == heap->fixed_cow_array_map()) { isolate->counters()->cow_arrays_created_runtime()->Increment(); @@ -223,7 +225,7 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate, Object* value = elements->get(i); ASSERT(value->IsSmi() || value->IsTheHole() || - (copy->GetElementsKind() == FAST_ELEMENTS)); + (IsFastObjectElementsKind(copy->GetElementsKind()))); if (value->IsJSObject()) { JSObject* js_object = JSObject::cast(value); { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate, @@ -268,6 +270,7 @@ MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate, case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: // No contained objects, nothing to do. break; } @@ -452,7 +455,7 @@ MaybeObject* TransitionElements(Handle object, } -static const int kSmiOnlyLiteralMinimumLength = 1024; +static const int kSmiLiteralMinimumLength = 1024; Handle Runtime::CreateArrayLiteralBoilerplate( @@ -470,23 +473,22 @@ Handle Runtime::CreateArrayLiteralBoilerplate( Handle constant_elements_values( FixedArrayBase::cast(elements->get(1))); + ASSERT(IsFastElementsKind(constant_elements_kind)); Context* global_context = isolate->context()->global_context(); - if (constant_elements_kind == FAST_SMI_ONLY_ELEMENTS) { - object->set_map(Map::cast(global_context->smi_js_array_map())); - } else if (constant_elements_kind == FAST_DOUBLE_ELEMENTS) { - object->set_map(Map::cast(global_context->double_js_array_map())); - } else { - object->set_map(Map::cast(global_context->object_js_array_map())); - } + Object* maybe_maps_array = global_context->js_array_maps(); + ASSERT(!maybe_maps_array->IsUndefined()); + Object* maybe_map = FixedArray::cast(maybe_maps_array)->get( + constant_elements_kind); + ASSERT(maybe_map->IsMap()); + object->set_map(Map::cast(maybe_map)); Handle copied_elements_values; - if (constant_elements_kind == FAST_DOUBLE_ELEMENTS) { + if (IsFastDoubleElementsKind(constant_elements_kind)) { ASSERT(FLAG_smi_only_arrays); copied_elements_values = isolate->factory()->CopyFixedDoubleArray( Handle::cast(constant_elements_values)); } else { - ASSERT(constant_elements_kind == FAST_SMI_ONLY_ELEMENTS || - constant_elements_kind == FAST_ELEMENTS); + ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind)); const bool is_cow = (constant_elements_values->map() == isolate->heap()->fixed_cow_array_map()); @@ -522,15 +524,22 @@ Handle Runtime::CreateArrayLiteralBoilerplate( object->set_elements(*copied_elements_values); object->set_length(Smi::FromInt(copied_elements_values->length())); - // Ensure that the boilerplate object has FAST_ELEMENTS, unless the flag is + // Ensure that the boilerplate object has FAST_*_ELEMENTS, unless the flag is // on or the object is larger than the threshold. if (!FLAG_smi_only_arrays && - constant_elements_values->length() < kSmiOnlyLiteralMinimumLength) { - if (object->GetElementsKind() != FAST_ELEMENTS) { - CHECK(!TransitionElements(object, FAST_ELEMENTS, isolate)->IsFailure()); + constant_elements_values->length() < kSmiLiteralMinimumLength) { + ElementsKind elements_kind = object->GetElementsKind(); + if (!IsFastObjectElementsKind(elements_kind)) { + if (IsFastHoleyElementsKind(elements_kind)) { + CHECK(!TransitionElements(object, FAST_HOLEY_ELEMENTS, + isolate)->IsFailure()); + } else { + CHECK(!TransitionElements(object, FAST_ELEMENTS, isolate)->IsFailure()); + } } } + object->ValidateElements(); return object; } @@ -1730,7 +1739,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) { // length of a string, i.e. it is always a Smi. We check anyway for security. CONVERT_SMI_ARG_CHECKED(index, 2); CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3); - RUNTIME_ASSERT(last_match_info->HasFastElements()); + RUNTIME_ASSERT(last_match_info->HasFastObjectElements()); RUNTIME_ASSERT(index >= 0); RUNTIME_ASSERT(index <= subject->length()); isolate->counters()->regexp_entry_runtime()->Increment(); @@ -3104,7 +3113,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString( const int parts_added_per_loop = 2 * (compiled_replacement.parts() + 2); bool matched = true; do { - ASSERT(last_match_info_handle->HasFastElements()); + ASSERT(last_match_info_handle->HasFastObjectElements()); // Increase the capacity of the builder before entering local handle-scope, // so its internal buffer can safely allocate a new handle if it grows. builder.EnsureCapacity(parts_added_per_loop); @@ -3201,7 +3210,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString( if (match.is_null()) return Failure::Exception(); if (match->IsNull()) return *subject_handle; - ASSERT(last_match_info_handle->HasFastElements()); + ASSERT(last_match_info_handle->HasFastObjectElements()); int start, end; { @@ -3275,7 +3284,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString( if (match.is_null()) return Failure::Exception(); if (match->IsNull()) break; - ASSERT(last_match_info_handle->HasFastElements()); + ASSERT(last_match_info_handle->HasFastObjectElements()); HandleScope loop_scope(isolate); { AssertNoAllocation match_info_array_is_not_in_a_handle; @@ -3345,7 +3354,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceRegExpWithString) { CONVERT_ARG_CHECKED(JSRegExp, regexp, 1); CONVERT_ARG_CHECKED(JSArray, last_match_info, 3); - ASSERT(last_match_info->HasFastElements()); + ASSERT(last_match_info->HasFastObjectElements()); if (replacement->length() == 0) { if (subject->HasOnlyAsciiChars()) { @@ -3795,62 +3804,73 @@ static bool SearchStringMultiple(Isolate* isolate, } -static RegExpImpl::IrregexpResult SearchRegExpNoCaptureMultiple( +static int SearchRegExpNoCaptureMultiple( Isolate* isolate, Handle subject, Handle regexp, Handle last_match_array, FixedArrayBuilder* builder) { ASSERT(subject->IsFlat()); + ASSERT(regexp->CaptureCount() == 0); int match_start = -1; int match_end = 0; int pos = 0; - int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject); - if (required_registers < 0) return RegExpImpl::RE_EXCEPTION; - - OffsetsVector registers(required_registers, isolate); + int registers_per_match = RegExpImpl::IrregexpPrepare(regexp, subject); + if (registers_per_match < 0) return RegExpImpl::RE_EXCEPTION; + + int max_matches; + int num_registers = RegExpImpl::GlobalOffsetsVectorSize(regexp, + registers_per_match, + &max_matches); + OffsetsVector registers(num_registers, isolate); Vector register_vector(registers.vector(), registers.length()); int subject_length = subject->length(); bool first = true; - for (;;) { // Break on failure, return on exception. - RegExpImpl::IrregexpResult result = - RegExpImpl::IrregexpExecOnce(regexp, - subject, - pos, - register_vector); - if (result == RegExpImpl::RE_SUCCESS) { - match_start = register_vector[0]; - builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch); - if (match_end < match_start) { - ReplacementStringBuilder::AddSubjectSlice(builder, - match_end, - match_start); - } - match_end = register_vector[1]; - HandleScope loop_scope(isolate); - if (!first) { - builder->Add(*isolate->factory()->NewProperSubString(subject, - match_start, - match_end)); - } else { - builder->Add(*isolate->factory()->NewSubString(subject, - match_start, - match_end)); + int num_matches = RegExpImpl::IrregexpExecRaw(regexp, + subject, + pos, + register_vector); + if (num_matches > 0) { + for (int match_index = 0; match_index < num_matches; match_index++) { + int32_t* current_match = ®ister_vector[match_index * 2]; + match_start = current_match[0]; + builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch); + if (match_end < match_start) { + ReplacementStringBuilder::AddSubjectSlice(builder, + match_end, + match_start); + } + match_end = current_match[1]; + HandleScope loop_scope(isolate); + if (!first) { + builder->Add(*isolate->factory()->NewProperSubString(subject, + match_start, + match_end)); + } else { + builder->Add(*isolate->factory()->NewSubString(subject, + match_start, + match_end)); + first = false; + } } + + // If we did not get the maximum number of matches, we can stop here + // since there are no matches left. + if (num_matches < max_matches) break; + if (match_start != match_end) { pos = match_end; } else { pos = match_end + 1; if (pos > subject_length) break; } - } else if (result == RegExpImpl::RE_FAILURE) { + } else if (num_matches == 0) { break; } else { - ASSERT_EQ(result, RegExpImpl::RE_EXCEPTION); - return result; + ASSERT_EQ(num_matches, RegExpImpl::RE_EXCEPTION); + return RegExpImpl::RE_EXCEPTION; } - first = false; } if (match_start >= 0) { @@ -3872,7 +3892,7 @@ static RegExpImpl::IrregexpResult SearchRegExpNoCaptureMultiple( // Only called from Runtime_RegExpExecMultiple so it doesn't need to maintain // separate last match info. See comment on that function. -static RegExpImpl::IrregexpResult SearchRegExpMultiple( +static int SearchRegExpMultiple( Isolate* isolate, Handle subject, Handle regexp, @@ -3880,17 +3900,20 @@ static RegExpImpl::IrregexpResult SearchRegExpMultiple( FixedArrayBuilder* builder) { ASSERT(subject->IsFlat()); - int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject); - if (required_registers < 0) return RegExpImpl::RE_EXCEPTION; - - OffsetsVector registers(required_registers, isolate); + int registers_per_match = RegExpImpl::IrregexpPrepare(regexp, subject); + if (registers_per_match < 0) return RegExpImpl::RE_EXCEPTION; + + int max_matches; + int num_registers = RegExpImpl::GlobalOffsetsVectorSize(regexp, + registers_per_match, + &max_matches); + OffsetsVector registers(num_registers, isolate); Vector register_vector(registers.vector(), registers.length()); - RegExpImpl::IrregexpResult result = - RegExpImpl::IrregexpExecOnce(regexp, - subject, - 0, - register_vector); + int num_matches = RegExpImpl::IrregexpExecRaw(regexp, + subject, + 0, + register_vector); int capture_count = regexp->CaptureCount(); int subject_length = subject->length(); @@ -3899,60 +3922,71 @@ static RegExpImpl::IrregexpResult SearchRegExpMultiple( int pos = 0; // End of previous match. Differs from pos if match was empty. int match_end = 0; - if (result == RegExpImpl::RE_SUCCESS) { - bool first = true; + bool first = true; + + if (num_matches > 0) { do { - int match_start = register_vector[0]; - builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch); - if (match_end < match_start) { - ReplacementStringBuilder::AddSubjectSlice(builder, - match_end, - match_start); - } - match_end = register_vector[1]; - - { - // Avoid accumulating new handles inside loop. - HandleScope temp_scope(isolate); - // Arguments array to replace function is match, captures, index and - // subject, i.e., 3 + capture count in total. - Handle elements = - isolate->factory()->NewFixedArray(3 + capture_count); - Handle match; - if (!first) { - match = isolate->factory()->NewProperSubString(subject, - match_start, - match_end); - } else { - match = isolate->factory()->NewSubString(subject, - match_start, - match_end); + int match_start = 0; + for (int match_index = 0; match_index < num_matches; match_index++) { + int32_t* current_match = + ®ister_vector[match_index * registers_per_match]; + match_start = current_match[0]; + builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch); + if (match_end < match_start) { + ReplacementStringBuilder::AddSubjectSlice(builder, + match_end, + match_start); } - elements->set(0, *match); - for (int i = 1; i <= capture_count; i++) { - int start = register_vector[i * 2]; - if (start >= 0) { - int end = register_vector[i * 2 + 1]; - ASSERT(start <= end); - Handle substring; - if (!first) { - substring = isolate->factory()->NewProperSubString(subject, - start, - end); + match_end = current_match[1]; + + { + // Avoid accumulating new handles inside loop. + HandleScope temp_scope(isolate); + // Arguments array to replace function is match, captures, index and + // subject, i.e., 3 + capture count in total. + Handle elements = + isolate->factory()->NewFixedArray(3 + capture_count); + Handle match; + if (!first) { + match = isolate->factory()->NewProperSubString(subject, + match_start, + match_end); + } else { + match = isolate->factory()->NewSubString(subject, + match_start, + match_end); + } + elements->set(0, *match); + for (int i = 1; i <= capture_count; i++) { + int start = current_match[i * 2]; + if (start >= 0) { + int end = current_match[i * 2 + 1]; + ASSERT(start <= end); + Handle substring; + if (!first) { + substring = + isolate->factory()->NewProperSubString(subject, start, end); + } else { + substring = + isolate->factory()->NewSubString(subject, start, end); + } + elements->set(i, *substring); } else { - substring = isolate->factory()->NewSubString(subject, start, end); + ASSERT(current_match[i * 2 + 1] < 0); + elements->set(i, isolate->heap()->undefined_value()); } - elements->set(i, *substring); - } else { - ASSERT(register_vector[i * 2 + 1] < 0); - elements->set(i, isolate->heap()->undefined_value()); } + elements->set(capture_count + 1, Smi::FromInt(match_start)); + elements->set(capture_count + 2, *subject); + builder->Add(*isolate->factory()->NewJSArrayWithElements(elements)); } - elements->set(capture_count + 1, Smi::FromInt(match_start)); - elements->set(capture_count + 2, *subject); - builder->Add(*isolate->factory()->NewJSArrayWithElements(elements)); + first = false; } + // If we did not get the maximum number of matches, we can stop here + // since there are no matches left. + if (num_matches < max_matches) break; + if (match_end > match_start) { pos = match_end; } else { @@ -3962,14 +3996,13 @@ static RegExpImpl::IrregexpResult SearchRegExpMultiple( } } - result = RegExpImpl::IrregexpExecOnce(regexp, - subject, - pos, - register_vector); - first = false; - } while (result == RegExpImpl::RE_SUCCESS); + num_matches = RegExpImpl::IrregexpExecRaw(regexp, + subject, + pos, + register_vector); + } while (num_matches > 0); - if (result != RegExpImpl::RE_EXCEPTION) { + if (num_matches != RegExpImpl::RE_EXCEPTION) { // Finished matching, with at least one match. if (match_end < subject_length) { ReplacementStringBuilder::AddSubjectSlice(builder, @@ -3993,7 +4026,7 @@ static RegExpImpl::IrregexpResult SearchRegExpMultiple( } } // No matches at all, return failure or exception result directly. - return result; + return num_matches; } @@ -4010,10 +4043,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) { CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 2); CONVERT_ARG_HANDLE_CHECKED(JSArray, result_array, 3); - ASSERT(last_match_info->HasFastElements()); + ASSERT(last_match_info->HasFastObjectElements()); ASSERT(regexp->GetFlags().is_global()); Handle result_elements; - if (result_array->HasFastElements()) { + if (result_array->HasFastObjectElements()) { result_elements = Handle(FixedArray::cast(result_array->elements())); } @@ -4035,7 +4068,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) { ASSERT_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP); - RegExpImpl::IrregexpResult result; + int result; if (regexp->CaptureCount() == 0) { result = SearchRegExpNoCaptureMultiple(isolate, subject, @@ -4315,17 +4348,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) { // JSObject without a string key. If the key is a Smi, check for a // definite out-of-bounds access to elements, which is a strong indicator // that subsequent accesses will also call the runtime. Proactively - // transition elements to FAST_ELEMENTS to avoid excessive boxing of + // transition elements to FAST_*_ELEMENTS to avoid excessive boxing of // doubles for those future calls in the case that the elements would // become FAST_DOUBLE_ELEMENTS. Handle js_object(args.at(0)); ElementsKind elements_kind = js_object->GetElementsKind(); - if (elements_kind == FAST_SMI_ONLY_ELEMENTS || - elements_kind == FAST_DOUBLE_ELEMENTS) { + if (IsFastElementsKind(elements_kind) && + !IsFastObjectElementsKind(elements_kind)) { FixedArrayBase* elements = js_object->elements(); if (args.at(1)->value() >= elements->length()) { + if (IsFastHoleyElementsKind(elements_kind)) { + elements_kind = FAST_HOLEY_ELEMENTS; + } else { + elements_kind = FAST_ELEMENTS; + } MaybeObject* maybe_object = TransitionElements(js_object, - FAST_ELEMENTS, + elements_kind, isolate); if (maybe_object->IsFailure()) return maybe_object; } @@ -4495,8 +4533,10 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate, return *value; } + js_object->ValidateElements(); Handle result = JSObject::SetElement( js_object, index, value, attr, strict_mode, set_mode); + js_object->ValidateElements(); if (result.is_null()) return Failure::Exception(); return *value; } @@ -4654,7 +4694,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsSmiToDouble) { NoHandleAllocation ha; RUNTIME_ASSERT(args.length() == 1); Handle object = args.at(0); - return TransitionElements(object, FAST_DOUBLE_ELEMENTS, isolate); + if (object->IsJSObject()) { + Handle js_object(Handle::cast(object)); + ElementsKind new_kind = js_object->HasFastHoleyElements() + ? FAST_HOLEY_DOUBLE_ELEMENTS + : FAST_DOUBLE_ELEMENTS; + return TransitionElements(object, new_kind, isolate); + } else { + return *object; + } } @@ -4662,7 +4710,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsDoubleToObject) { NoHandleAllocation ha; RUNTIME_ASSERT(args.length() == 1); Handle object = args.at(0); - return TransitionElements(object, FAST_ELEMENTS, isolate); + if (object->IsJSObject()) { + Handle js_object(Handle::cast(object)); + ElementsKind new_kind = js_object->HasFastHoleyElements() + ? FAST_HOLEY_ELEMENTS + : FAST_ELEMENTS; + return TransitionElements(object, new_kind, isolate); + } else { + return *object; + } } @@ -4693,32 +4749,38 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) { HandleScope scope; Object* raw_boilerplate_object = literals->get(literal_index); - Handle boilerplate(JSArray::cast(raw_boilerplate_object)); -#if DEBUG + Handle boilerplate_object(JSArray::cast(raw_boilerplate_object)); ElementsKind elements_kind = object->GetElementsKind(); -#endif - ASSERT(elements_kind <= FAST_DOUBLE_ELEMENTS); + ASSERT(IsFastElementsKind(elements_kind)); // Smis should never trigger transitions. ASSERT(!value->IsSmi()); if (value->IsNumber()) { - ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS); - JSObject::TransitionElementsKind(object, FAST_DOUBLE_ELEMENTS); - if (IsMoreGeneralElementsKindTransition(boilerplate->GetElementsKind(), - FAST_DOUBLE_ELEMENTS)) { - JSObject::TransitionElementsKind(boilerplate, FAST_DOUBLE_ELEMENTS); - } - ASSERT(object->GetElementsKind() == FAST_DOUBLE_ELEMENTS); + ASSERT(IsFastSmiElementsKind(elements_kind)); + ElementsKind transitioned_kind = IsFastHoleyElementsKind(elements_kind) + ? FAST_HOLEY_DOUBLE_ELEMENTS + : FAST_DOUBLE_ELEMENTS; + if (IsMoreGeneralElementsKindTransition( + boilerplate_object->GetElementsKind(), + transitioned_kind)) { + JSObject::TransitionElementsKind(boilerplate_object, transitioned_kind); + } + JSObject::TransitionElementsKind(object, transitioned_kind); + ASSERT(IsFastDoubleElementsKind(object->GetElementsKind())); FixedDoubleArray* double_array = FixedDoubleArray::cast(object->elements()); HeapNumber* number = HeapNumber::cast(*value); double_array->set(store_index, number->Number()); } else { - ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS || - elements_kind == FAST_DOUBLE_ELEMENTS); - JSObject::TransitionElementsKind(object, FAST_ELEMENTS); - if (IsMoreGeneralElementsKindTransition(boilerplate->GetElementsKind(), - FAST_ELEMENTS)) { - JSObject::TransitionElementsKind(boilerplate, FAST_ELEMENTS); + ASSERT(IsFastSmiElementsKind(elements_kind) || + IsFastDoubleElementsKind(elements_kind)); + ElementsKind transitioned_kind = IsFastHoleyElementsKind(elements_kind) + ? FAST_HOLEY_ELEMENTS + : FAST_ELEMENTS; + JSObject::TransitionElementsKind(object, transitioned_kind); + if (IsMoreGeneralElementsKindTransition( + boilerplate_object->GetElementsKind(), + transitioned_kind)) { + JSObject::TransitionElementsKind(boilerplate_object, transitioned_kind); } FixedArray* object_array = FixedArray::cast(object->elements()); object_array->set(store_index, *value); @@ -5931,7 +5993,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringArray) { ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(JSArray, array, 0); - if (!array->HasFastElements()) return isolate->heap()->undefined_value(); + if (!array->HasFastObjectElements()) { + return isolate->heap()->undefined_value(); + } FixedArray* elements = FixedArray::cast(array->elements()); int n = elements->length(); bool ascii = true; @@ -6374,7 +6438,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) { if (maybe_result->IsFailure()) return maybe_result; result->set_length(Smi::FromInt(part_count)); - ASSERT(result->HasFastElements()); + ASSERT(result->HasFastObjectElements()); if (part_count == 1 && indices.at(0) == subject_length) { FixedArray::cast(result->elements())->set(0, *subject); @@ -6393,7 +6457,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) { } if (limit == 0xffffffffu) { - if (result->HasFastElements()) { + if (result->HasFastObjectElements()) { StringSplitCache::Enter(isolate->heap(), isolate->heap()->string_split_cache(), *subject, @@ -6750,7 +6814,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) { if (maybe_result->IsFailure()) return maybe_result; int special_length = special->length(); - if (!array->HasFastElements()) { + if (!array->HasFastObjectElements()) { return isolate->Throw(isolate->heap()->illegal_argument_symbol()); } FixedArray* fixed_array = FixedArray::cast(array->elements()); @@ -6860,7 +6924,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) { int array_length = args.smi_at(1); CONVERT_ARG_CHECKED(String, separator, 2); - if (!array->HasFastElements()) { + if (!array->HasFastObjectElements()) { return isolate->Throw(isolate->heap()->illegal_argument_symbol()); } FixedArray* fixed_array = FixedArray::cast(array->elements()); @@ -6977,8 +7041,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) { NoHandleAllocation ha; ASSERT(args.length() == 3); CONVERT_ARG_CHECKED(JSArray, elements_array, 0); - RUNTIME_ASSERT(elements_array->HasFastElements() || - elements_array->HasFastSmiOnlyElements()); + RUNTIME_ASSERT(elements_array->HasFastSmiOrObjectElements()); CONVERT_NUMBER_CHECKED(uint32_t, array_length, Uint32, args[1]); CONVERT_ARG_CHECKED(String, separator, 2); // elements_array is fast-mode JSarray of alternating positions @@ -9139,7 +9202,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) { MaybeObject* maybe_result_array = output->EnsureCanContainHeapObjectElements(); if (maybe_result_array->IsFailure()) return maybe_result_array; - RUNTIME_ASSERT(output->HasFastElements()); + RUNTIME_ASSERT(output->HasFastObjectElements()); AssertNoAllocation no_allocation; @@ -9371,7 +9434,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) { ASSERT(args.length() == 2); CONVERT_ARG_CHECKED(JSArray, array, 0); CONVERT_ARG_CHECKED(JSObject, element, 1); - RUNTIME_ASSERT(array->HasFastElements() || array->HasFastSmiOnlyElements()); + RUNTIME_ASSERT(array->HasFastSmiOrObjectElements()); int length = Smi::cast(array->length())->value(); FixedArray* elements = FixedArray::cast(array->elements()); for (int i = 0; i < length; i++) { @@ -9456,7 +9519,7 @@ class ArrayConcatVisitor { Handle map; if (fast_elements_) { map = isolate_->factory()->GetElementsTransitionMap(array, - FAST_ELEMENTS); + FAST_HOLEY_ELEMENTS); } else { map = isolate_->factory()->GetElementsTransitionMap(array, DICTIONARY_ELEMENTS); @@ -9515,8 +9578,10 @@ static uint32_t EstimateElementCount(Handle array) { uint32_t length = static_cast(array->length()->Number()); int element_count = 0; switch (array->GetElementsKind()) { - case FAST_SMI_ONLY_ELEMENTS: - case FAST_ELEMENTS: { + case FAST_SMI_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_ELEMENTS: + case FAST_HOLEY_ELEMENTS: { // Fast elements can't have lengths that are not representable by // a 32-bit signed integer. ASSERT(static_cast(FixedArray::kMaxLength) >= 0); @@ -9528,6 +9593,7 @@ static uint32_t EstimateElementCount(Handle array) { break; } case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: // TODO(1810): Decide if it's worthwhile to implement this. UNREACHABLE(); break; @@ -9618,8 +9684,10 @@ static void CollectElementIndices(Handle object, List* indices) { ElementsKind kind = object->GetElementsKind(); switch (kind) { - case FAST_SMI_ONLY_ELEMENTS: - case FAST_ELEMENTS: { + case FAST_SMI_ELEMENTS: + case FAST_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: { Handle elements(FixedArray::cast(object->elements())); uint32_t length = static_cast(elements->length()); if (range < length) length = range; @@ -9630,6 +9698,7 @@ static void CollectElementIndices(Handle object, } break; } + case FAST_HOLEY_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: { // TODO(1810): Decide if it's worthwhile to implement this. UNREACHABLE(); @@ -9744,8 +9813,10 @@ static bool IterateElements(Isolate* isolate, ArrayConcatVisitor* visitor) { uint32_t length = static_cast(receiver->length()->Number()); switch (receiver->GetElementsKind()) { - case FAST_SMI_ONLY_ELEMENTS: - case FAST_ELEMENTS: { + case FAST_SMI_ELEMENTS: + case FAST_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: { // Run through the elements FixedArray and use HasElement and GetElement // to check the prototype for missing elements. Handle elements(FixedArray::cast(receiver->elements())); @@ -9766,6 +9837,7 @@ static bool IterateElements(Isolate* isolate, } break; } + case FAST_HOLEY_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: { // TODO(1810): Decide if it's worthwhile to implement this. UNREACHABLE(); @@ -9863,7 +9935,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) { CONVERT_ARG_HANDLE_CHECKED(JSArray, arguments, 0); int argument_count = static_cast(arguments->length()->Number()); - RUNTIME_ASSERT(arguments->HasFastElements()); + RUNTIME_ASSERT(arguments->HasFastObjectElements()); Handle elements(FixedArray::cast(arguments->elements())); // Pass 1: estimate the length and number of elements of the result. @@ -9883,10 +9955,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) { Handle array(Handle::cast(obj)); // TODO(1810): Find out if it's worthwhile to properly support // arbitrary ElementsKinds. For now, pessimistically transition to - // FAST_ELEMENTS. + // FAST_*_ELEMENTS. if (array->HasFastDoubleElements()) { + ElementsKind to_kind = FAST_ELEMENTS; + if (array->HasFastHoleyElements()) { + to_kind = FAST_HOLEY_ELEMENTS; + } array = Handle::cast( - JSObject::TransitionElementsKind(array, FAST_ELEMENTS)); + JSObject::TransitionElementsKind(array, to_kind)); } length_estimate = static_cast(array->length()->Number()); @@ -9983,29 +10059,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) { ASSERT(args.length() == 2); CONVERT_ARG_CHECKED(JSArray, from, 0); CONVERT_ARG_CHECKED(JSArray, to, 1); + from->ValidateElements(); + to->ValidateElements(); FixedArrayBase* new_elements = from->elements(); + ElementsKind from_kind = from->GetElementsKind(); MaybeObject* maybe_new_map; - ElementsKind elements_kind; - if (new_elements->map() == isolate->heap()->fixed_array_map() || - new_elements->map() == isolate->heap()->fixed_cow_array_map()) { - elements_kind = FAST_ELEMENTS; - } else if (new_elements->map() == - isolate->heap()->fixed_double_array_map()) { - elements_kind = FAST_DOUBLE_ELEMENTS; - } else { - elements_kind = DICTIONARY_ELEMENTS; - } - maybe_new_map = to->GetElementsTransitionMap(isolate, elements_kind); + maybe_new_map = to->GetElementsTransitionMap(isolate, from_kind); Object* new_map; if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map; - to->set_map(Map::cast(new_map)); - to->set_elements(new_elements); + to->set_map_and_elements(Map::cast(new_map), new_elements); to->set_length(from->length()); Object* obj; { MaybeObject* maybe_obj = from->ResetElements(); if (!maybe_obj->ToObject(&obj)) return maybe_obj; } from->set_length(Smi::FromInt(0)); + to->ValidateElements(); return to; } @@ -10026,36 +10095,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_EstimateNumberOfElements) { } -RUNTIME_FUNCTION(MaybeObject*, Runtime_SwapElements) { - HandleScope handle_scope(isolate); - - ASSERT_EQ(3, args.length()); - - CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0); - Handle key1 = args.at(1); - Handle key2 = args.at(2); - - uint32_t index1, index2; - if (!key1->ToArrayIndex(&index1) - || !key2->ToArrayIndex(&index2)) { - return isolate->ThrowIllegalOperation(); - } - - Handle jsobject = Handle::cast(object); - Handle tmp1 = Object::GetElement(jsobject, index1); - RETURN_IF_EMPTY_HANDLE(isolate, tmp1); - Handle tmp2 = Object::GetElement(jsobject, index2); - RETURN_IF_EMPTY_HANDLE(isolate, tmp2); - - RETURN_IF_EMPTY_HANDLE( - isolate, JSObject::SetElement(jsobject, index1, tmp2, NONE, kStrictMode)); - RETURN_IF_EMPTY_HANDLE( - isolate, JSObject::SetElement(jsobject, index2, tmp1, NONE, kStrictMode)); - - return isolate->heap()->undefined_value(); -} - - // Returns an array that tells you where in the [0, length) interval an array // might have elements. Can either return keys (positive integers) or // intervals (pair of a negative integer (-start-1) followed by a @@ -10085,8 +10124,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) { } return *isolate->factory()->NewJSArrayWithElements(keys); } else { - ASSERT(array->HasFastElements() || - array->HasFastSmiOnlyElements() || + ASSERT(array->HasFastSmiOrObjectElements() || array->HasFastDoubleElements()); Handle single_interval = isolate->factory()->NewFixedArray(2); // -1 means start of array. @@ -13401,9 +13439,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IS_VAR) { return isolate->heap()->ToBoolean(obj->Has##Name()); \ } -ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiOnlyElements) -ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastElements) +ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiElements) +ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastObjectElements) +ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiOrObjectElements) ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastDoubleElements) +ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastHoleyElements) ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements) ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalPixelElements) ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(ExternalArrayElements) diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h index a09d9cc..fc0a472 100644 --- a/deps/v8/src/runtime.h +++ b/deps/v8/src/runtime.h @@ -272,7 +272,6 @@ namespace internal { F(GetArrayKeys, 2, 1) \ F(MoveArrayContents, 2, 1) \ F(EstimateNumberOfElements, 1, 1) \ - F(SwapElements, 3, 1) \ \ /* Getters and Setters */ \ F(LookupAccessor, 3, 1) \ @@ -365,9 +364,11 @@ namespace internal { F(IS_VAR, 1, 1) \ \ /* expose boolean functions from objects-inl.h */ \ - F(HasFastSmiOnlyElements, 1, 1) \ - F(HasFastElements, 1, 1) \ + F(HasFastSmiElements, 1, 1) \ + F(HasFastSmiOrObjectElements, 1, 1) \ + F(HasFastObjectElements, 1, 1) \ F(HasFastDoubleElements, 1, 1) \ + F(HasFastHoleyElements, 1, 1) \ F(HasDictionaryElements, 1, 1) \ F(HasExternalPixelElements, 1, 1) \ F(HasExternalArrayElements, 1, 1) \ @@ -536,8 +537,7 @@ namespace internal { F(RegExpExec, 4, 1) \ F(RegExpConstructResult, 3, 1) \ F(GetFromCache, 2, 1) \ - F(NumberToString, 1, 1) \ - F(SwapElements, 3, 1) + F(NumberToString, 1, 1) //--------------------------------------------------------------------------- diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc index 6f6032a..2c61a75 100644 --- a/deps/v8/src/scopes.cc +++ b/deps/v8/src/scopes.cc @@ -658,6 +658,26 @@ bool Scope::HasTrivialOuterContext() const { } +bool Scope::AllowsLazyRecompilation() const { + return !force_eager_compilation_ && + !TrivialDeclarationScopesBeforeWithScope(); +} + + +bool Scope::TrivialDeclarationScopesBeforeWithScope() const { + Scope* outer = outer_scope_; + if (outer == NULL) return false; + outer = outer->DeclarationScope(); + while (outer != NULL) { + if (outer->is_with_scope()) return true; + if (outer->is_declaration_scope() && outer->num_heap_slots() > 0) + return false; + outer = outer->outer_scope_; + } + return false; +} + + int Scope::ContextChainLength(Scope* scope) { int n = 0; for (Scope* s = this; s != scope; s = s->outer_scope_) { diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h index e1a658a..be6705b 100644 --- a/deps/v8/src/scopes.h +++ b/deps/v8/src/scopes.h @@ -362,13 +362,16 @@ class Scope: public ZoneObject { bool AllowsLazyCompilation() const; // True if we can lazily recompile functions with this scope. - bool allows_lazy_recompilation() const { - return !force_eager_compilation_; - } + bool AllowsLazyRecompilation() const; // True if the outer context of this scope is always the global context. bool HasTrivialOuterContext() const; + // True if this scope is inside a with scope and all declaration scopes + // between them have empty contexts. Such declaration scopes become + // invisible during scope info deserialization. + bool TrivialDeclarationScopesBeforeWithScope() const; + // The number of contexts between this and scope; zero if this == scope. int ContextChainLength(Scope* scope); diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc index 35f7be5..bf711ba 100644 --- a/deps/v8/src/string-stream.cc +++ b/deps/v8/src/string-stream.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -427,7 +427,7 @@ void StringStream::PrintMentionedObjectCache() { PrintUsingMap(JSObject::cast(printee)); if (printee->IsJSArray()) { JSArray* array = JSArray::cast(printee); - if (array->HasFastElements()) { + if (array->HasFastObjectElements()) { unsigned int limit = FixedArray::cast(array->elements())->length(); unsigned int length = static_cast(JSArray::cast(array)->length()->Number()); diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h index 94a5264..6db9c77 100644 --- a/deps/v8/src/v8-counters.h +++ b/deps/v8/src/v8-counters.h @@ -236,8 +236,6 @@ namespace internal { SC(math_sin, V8.MathSin) \ SC(math_sqrt, V8.MathSqrt) \ SC(math_tan, V8.MathTan) \ - SC(array_bounds_checks_seen, V8.ArrayBoundsChecksSeen) \ - SC(array_bounds_checks_removed, V8.ArrayBoundsChecksRemoved) \ SC(transcendental_cache_hit, V8.TranscendentalCacheHit) \ SC(transcendental_cache_miss, V8.TranscendentalCacheMiss) \ SC(stack_interrupts, V8.StackInterrupts) \ diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc index 0f263ee..534622e 100644 --- a/deps/v8/src/version.cc +++ b/deps/v8/src/version.cc @@ -34,7 +34,7 @@ // cannot be changed without changing the SCons build script. #define MAJOR_VERSION 3 #define MINOR_VERSION 11 -#define BUILD_NUMBER 1 +#define BUILD_NUMBER 7 #define PATCH_LEVEL 0 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc index 4e037ff..0af0a43 100644 --- a/deps/v8/src/x64/builtins-x64.cc +++ b/deps/v8/src/x64/builtins-x64.cc @@ -977,7 +977,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm, const int initial_capacity = JSArray::kPreallocatedArrayElements; STATIC_ASSERT(initial_capacity >= 0); - __ LoadInitialArrayMap(array_function, scratch2, scratch1); + __ LoadInitialArrayMap(array_function, scratch2, scratch1, false); // Allocate the JSArray object together with space for a fixed array with the // requested elements. @@ -1076,7 +1076,8 @@ static void AllocateJSArray(MacroAssembler* masm, Register scratch, bool fill_with_hole, Label* gc_required) { - __ LoadInitialArrayMap(array_function, scratch, elements_array); + __ LoadInitialArrayMap(array_function, scratch, + elements_array, fill_with_hole); if (FLAG_debug_code) { // Assert that array size is not zero. __ testq(array_size, array_size); @@ -1303,10 +1304,10 @@ static void ArrayNativeCode(MacroAssembler* masm, __ jmp(call_generic_code); __ bind(¬_double); - // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS. + // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS. // rbx: JSArray __ movq(r11, FieldOperand(rbx, HeapObject::kMapOffset)); - __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, r11, kScratchRegister, diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc index d179d2a..61d6c87 100644 --- a/deps/v8/src/x64/code-stubs-x64.cc +++ b/deps/v8/src/x64/code-stubs-x64.cc @@ -2864,30 +2864,37 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ IncrementCounter(counters->regexp_entry_native(), 1); // Isolates: note we add an additional parameter here (isolate pointer). - static const int kRegExpExecuteArguments = 8; + static const int kRegExpExecuteArguments = 9; int argument_slots_on_stack = masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments); __ EnterApiExitFrame(argument_slots_on_stack); - // Argument 8: Pass current isolate address. + // Argument 9: Pass current isolate address. // __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize), // Immediate(ExternalReference::isolate_address())); __ LoadAddress(kScratchRegister, ExternalReference::isolate_address()); __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize), kScratchRegister); - // Argument 7: Indicate that this is a direct call from JavaScript. + // Argument 8: Indicate that this is a direct call from JavaScript. __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize), Immediate(1)); - // Argument 6: Start (high end) of backtracking stack memory area. + // Argument 7: Start (high end) of backtracking stack memory area. __ movq(kScratchRegister, address_of_regexp_stack_memory_address); __ movq(r9, Operand(kScratchRegister, 0)); __ movq(kScratchRegister, address_of_regexp_stack_memory_size); __ addq(r9, Operand(kScratchRegister, 0)); - // Argument 6 passed in r9 on Linux and on the stack on Windows. -#ifdef _WIN64 __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9); + + // Argument 6: Set the number of capture registers to zero to force global + // regexps to behave as non-global. This does not affect non-global regexps. + // Argument 6 is passed in r9 on Linux and on the stack on Windows. +#ifdef _WIN64 + __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize), + Immediate(0)); +#else + __ Set(r9, 0); #endif // Argument 5: static offsets vector buffer. @@ -2895,7 +2902,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { ExternalReference::address_of_static_offsets_vector(isolate)); // Argument 5 passed in r8 on Linux and on the stack on Windows. #ifdef _WIN64 - __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize), r8); + __ movq(Operand(rsp, (argument_slots_on_stack - 5) * kPointerSize), r8); #endif // First four arguments are passed in registers on both Linux and Windows. @@ -2960,7 +2967,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Check the result. Label success; Label exception; - __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS)); + __ cmpl(rax, Immediate(1)); + // We expect exactly one result since we force the called regexp to behave + // as non-global. __ j(equal, &success, Label::kNear); __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION)); __ j(equal, &exception); @@ -5993,12 +6002,12 @@ struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { // KeyedStoreStubCompiler::GenerateStoreFastElement. { REG(rdi), REG(rbx), REG(rcx), EMIT_REMEMBERED_SET}, { REG(rdx), REG(rdi), REG(rbx), EMIT_REMEMBERED_SET}, - // ElementsTransitionGenerator::GenerateSmiOnlyToObject - // and ElementsTransitionGenerator::GenerateSmiOnlyToObject + // ElementsTransitionGenerator::GenerateMapChangeElementTransition + // and ElementsTransitionGenerator::GenerateSmiToDouble // and ElementsTransitionGenerator::GenerateDoubleToObject { REG(rdx), REG(rbx), REG(rdi), EMIT_REMEMBERED_SET}, { REG(rdx), REG(rbx), REG(rdi), OMIT_REMEMBERED_SET}, - // ElementsTransitionGenerator::GenerateSmiOnlyToDouble + // ElementsTransitionGenerator::GenerateSmiToDouble // and ElementsTransitionGenerator::GenerateDoubleToObject { REG(rdx), REG(r11), REG(r15), EMIT_REMEMBERED_SET}, // ElementsTransitionGenerator::GenerateDoubleToObject @@ -6272,9 +6281,9 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { __ CheckFastElements(rdi, &double_elements); - // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS + // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS __ JumpIfSmi(rax, &smi_element); - __ CheckFastSmiOnlyElements(rdi, &fast_elements); + __ CheckFastSmiElements(rdi, &fast_elements); // Store into the array literal requires a elements transition. Call into // the runtime. @@ -6292,7 +6301,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { // place. __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); - // Array literal has ElementsKind of FAST_ELEMENTS and value is an object. + // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. __ bind(&fast_elements); __ SmiToInteger32(kScratchRegister, rcx); __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset)); @@ -6306,8 +6315,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { OMIT_SMI_CHECK); __ ret(0); - // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or - // FAST_ELEMENTS, and value is Smi. + // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or + // FAST_*_ELEMENTS, and value is Smi. __ bind(&smi_element); __ SmiToInteger32(kScratchRegister, rcx); __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset)); diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc index a8d39b2..2924810 100644 --- a/deps/v8/src/x64/codegen-x64.cc +++ b/deps/v8/src/x64/codegen-x64.cc @@ -220,7 +220,7 @@ ModuloFunction CreateModuloFunction() { #define __ ACCESS_MASM(masm) -void ElementsTransitionGenerator::GenerateSmiOnlyToObject( +void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( MacroAssembler* masm) { // ----------- S t a t e ------------- // -- rax : value @@ -241,7 +241,7 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToObject( } -void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( +void ElementsTransitionGenerator::GenerateSmiToDouble( MacroAssembler* masm, Label* fail) { // ----------- S t a t e ------------- // -- rax : value diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc index 94a50eb..1b29e58 100644 --- a/deps/v8/src/x64/debug-x64.cc +++ b/deps/v8/src/x64/debug-x64.cc @@ -91,7 +91,7 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() { rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength); } -const bool Debug::FramePaddingLayout::kIsSupported = false; +const bool Debug::FramePaddingLayout::kIsSupported = true; #define __ ACCESS_MASM(masm) @@ -105,6 +105,12 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, { FrameScope scope(masm, StackFrame::INTERNAL); + // Load padding words on stack. + for (int i = 0; i < Debug::FramePaddingLayout::kInitialSize; i++) { + __ Push(Smi::FromInt(Debug::FramePaddingLayout::kPaddingValue)); + } + __ Push(Smi::FromInt(Debug::FramePaddingLayout::kInitialSize)); + // Store the registers containing live values on the expression stack to // make sure that these are correctly updated during GC. Non object values // are stored as as two smis causing it to be untouched by GC. @@ -159,6 +165,11 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, } } + // Read current padding counter and skip corresponding number of words. + __ pop(kScratchRegister); + __ SmiToInteger32(kScratchRegister, kScratchRegister); + __ lea(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0)); + // Get rid of the internal frame. } diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc index 7ed81b4..0738153 100644 --- a/deps/v8/src/x64/disasm-x64.cc +++ b/deps/v8/src/x64/disasm-x64.cc @@ -1684,7 +1684,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector out_buffer, default: UNREACHABLE(); } - AppendToBuffer("test%c rax,0x%"V8_PTR_PREFIX"x", + AppendToBuffer("test%c rax,0x%" V8_PTR_PREFIX "x", operand_size_code(), value); break; diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc index a6c4c99..0db7424 100644 --- a/deps/v8/src/x64/full-codegen-x64.cc +++ b/deps/v8/src/x64/full-codegen-x64.cc @@ -659,7 +659,7 @@ void FullCodeGenerator::DoTest(Expression* condition, Label* fall_through) { ToBooleanStub stub(result_register()); __ push(result_register()); - __ CallStub(&stub); + __ CallStub(&stub, condition->test_id()); __ testq(result_register(), result_register()); // The stub returns nonzero for true. Split(not_zero, if_true, if_false, fall_through); @@ -1659,7 +1659,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { ASSERT_EQ(2, constant_elements->length()); ElementsKind constant_elements_kind = static_cast(Smi::cast(constant_elements->get(0))->value()); - bool has_constant_fast_elements = constant_elements_kind == FAST_ELEMENTS; + bool has_constant_fast_elements = + IsFastObjectElementsKind(constant_elements_kind); Handle constant_elements_values( FixedArrayBase::cast(constant_elements->get(1))); @@ -1670,7 +1671,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { Heap* heap = isolate()->heap(); if (has_constant_fast_elements && constant_elements_values->map() == heap->fixed_cow_array_map()) { - // If the elements are already FAST_ELEMENTS, the boilerplate cannot + // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot // change, so it's possible to specialize the stub in advance. __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1); FastCloneShallowArrayStub stub( @@ -1682,10 +1683,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3); } else { - ASSERT(constant_elements_kind == FAST_ELEMENTS || - constant_elements_kind == FAST_SMI_ONLY_ELEMENTS || + ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) || FLAG_smi_only_arrays); - // If the elements are already FAST_ELEMENTS, the boilerplate cannot + // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot // change, so it's possible to specialize the stub in advance. FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements ? FastCloneShallowArrayStub::CLONE_ELEMENTS @@ -1713,9 +1713,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { } VisitForAccumulatorValue(subexpr); - if (constant_elements_kind == FAST_ELEMENTS) { - // Fast-case array literal with ElementsKind of FAST_ELEMENTS, they cannot - // transition and don't need to call the runtime stub. + if (IsFastObjectElementsKind(constant_elements_kind)) { + // Fast-case array literal with ElementsKind of FAST_*_ELEMENTS, they + // cannot transition and don't need to call the runtime stub. int offset = FixedArray::kHeaderSize + (i * kPointerSize); __ movq(rbx, Operand(rsp, 0)); // Copy of array literal. __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset)); @@ -2287,7 +2287,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) { CallFunctionStub stub(arg_count, flags); __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize)); - __ CallStub(&stub); + __ CallStub(&stub, expr->id()); RecordJSReturnSite(expr); // Restore context register. __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); @@ -3360,102 +3360,6 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) { } -void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) { - ZoneList* args = expr->arguments(); - ASSERT(args->length() == 3); - VisitForStackValue(args->at(0)); - VisitForStackValue(args->at(1)); - VisitForStackValue(args->at(2)); - Label done; - Label slow_case; - Register object = rax; - Register index_1 = rbx; - Register index_2 = rcx; - Register elements = rdi; - Register temp = rdx; - __ movq(object, Operand(rsp, 2 * kPointerSize)); - // Fetch the map and check if array is in fast case. - // Check that object doesn't require security checks and - // has no indexed interceptor. - __ CmpObjectType(object, JS_ARRAY_TYPE, temp); - __ j(not_equal, &slow_case); - __ testb(FieldOperand(temp, Map::kBitFieldOffset), - Immediate(KeyedLoadIC::kSlowCaseBitFieldMask)); - __ j(not_zero, &slow_case); - - // Check the object's elements are in fast case and writable. - __ movq(elements, FieldOperand(object, JSObject::kElementsOffset)); - __ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset), - Heap::kFixedArrayMapRootIndex); - __ j(not_equal, &slow_case); - - // Check that both indices are smis. - __ movq(index_1, Operand(rsp, 1 * kPointerSize)); - __ movq(index_2, Operand(rsp, 0 * kPointerSize)); - __ JumpIfNotBothSmi(index_1, index_2, &slow_case); - - // Check that both indices are valid. - // The JSArray length field is a smi since the array is in fast case mode. - __ movq(temp, FieldOperand(object, JSArray::kLengthOffset)); - __ SmiCompare(temp, index_1); - __ j(below_equal, &slow_case); - __ SmiCompare(temp, index_2); - __ j(below_equal, &slow_case); - - __ SmiToInteger32(index_1, index_1); - __ SmiToInteger32(index_2, index_2); - // Bring addresses into index1 and index2. - __ lea(index_1, FieldOperand(elements, index_1, times_pointer_size, - FixedArray::kHeaderSize)); - __ lea(index_2, FieldOperand(elements, index_2, times_pointer_size, - FixedArray::kHeaderSize)); - - // Swap elements. Use object and temp as scratch registers. - __ movq(object, Operand(index_1, 0)); - __ movq(temp, Operand(index_2, 0)); - __ movq(Operand(index_2, 0), object); - __ movq(Operand(index_1, 0), temp); - - Label no_remembered_set; - __ CheckPageFlag(elements, - temp, - 1 << MemoryChunk::SCAN_ON_SCAVENGE, - not_zero, - &no_remembered_set, - Label::kNear); - // Possible optimization: do a check that both values are Smis - // (or them and test against Smi mask.) - - // We are swapping two objects in an array and the incremental marker never - // pauses in the middle of scanning a single object. Therefore the - // incremental marker is not disturbed, so we don't need to call the - // RecordWrite stub that notifies the incremental marker. - __ RememberedSetHelper(elements, - index_1, - temp, - kDontSaveFPRegs, - MacroAssembler::kFallThroughAtEnd); - __ RememberedSetHelper(elements, - index_2, - temp, - kDontSaveFPRegs, - MacroAssembler::kFallThroughAtEnd); - - __ bind(&no_remembered_set); - - // We are done. Drop elements from the stack, and return undefined. - __ addq(rsp, Immediate(3 * kPointerSize)); - __ LoadRoot(rax, Heap::kUndefinedValueRootIndex); - __ jmp(&done); - - __ bind(&slow_case); - __ CallRuntime(Runtime::kSwapElements, 3); - - __ bind(&done); - context()->Plug(rax); -} - - void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { ZoneList* args = expr->arguments(); ASSERT_EQ(2, args->length()); diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc index 6ba5fb6..82fdb3c 100644 --- a/deps/v8/src/x64/ic-x64.cc +++ b/deps/v8/src/x64/ic-x64.cc @@ -769,25 +769,25 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex); __ j(not_equal, &non_double_value); - // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS -> + // Value is a double. Transition FAST_SMI_ELEMENTS -> // FAST_DOUBLE_ELEMENTS and complete the store. - __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, rbx, rdi, &slow); - ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow); + ElementsTransitionGenerator::GenerateSmiToDouble(masm, &slow); __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); __ jmp(&fast_double_without_map_check); __ bind(&non_double_value); - // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS - __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, rbx, rdi, &slow); - ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm); + ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm); __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); __ jmp(&finish_object_store); @@ -1642,7 +1642,7 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) { // Must return the modified receiver in eax. if (!FLAG_trace_elements_transitions) { Label fail; - ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail); + ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail); __ movq(rax, rdx); __ Ret(); __ bind(&fail); diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc index 5f5c2af..f1c631b 100644 --- a/deps/v8/src/x64/lithium-codegen-x64.cc +++ b/deps/v8/src/x64/lithium-codegen-x64.cc @@ -2324,8 +2324,10 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) { __ movzxbq(temp, FieldOperand(temp, Map::kBitField2Offset)); __ and_(temp, Immediate(Map::kElementsKindMask)); __ shr(temp, Immediate(Map::kElementsKindShift)); - __ cmpl(temp, Immediate(FAST_ELEMENTS)); - __ j(equal, &ok, Label::kNear); + __ cmpl(temp, Immediate(GetInitialFastElementsKind())); + __ j(less, &fail, Label::kNear); + __ cmpl(temp, Immediate(TERMINAL_FAST_ELEMENTS_KIND)); + __ j(less_equal, &ok, Label::kNear); __ cmpl(temp, Immediate(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND)); __ j(less, &fail, Label::kNear); __ cmpl(temp, Immediate(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND)); @@ -2369,11 +2371,20 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { Register result = ToRegister(instr->result()); + if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) { + // Sign extend key because it could be a 32 bit negative value + // and the dehoisted address computation happens in 64 bits. + Register key_reg = ToRegister(instr->key()); + __ movsxlq(key_reg, key_reg); + } + // Load the result. __ movq(result, - BuildFastArrayOperand(instr->elements(), instr->key(), + BuildFastArrayOperand(instr->elements(), + instr->key(), FAST_ELEMENTS, - FixedArray::kHeaderSize - kHeapObjectTag)); + FixedArray::kHeaderSize - kHeapObjectTag, + instr->additional_index())); // Check for the hole value. if (instr->hydrogen()->RequiresHoleCheck()) { @@ -2387,19 +2398,32 @@ void LCodeGen::DoLoadKeyedFastDoubleElement( LLoadKeyedFastDoubleElement* instr) { XMMRegister result(ToDoubleRegister(instr->result())); - int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + - sizeof(kHoleNanLower32); - Operand hole_check_operand = BuildFastArrayOperand( + if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) { + // Sign extend key because it could be a 32 bit negative value + // and the dehoisted address computation happens in 64 bits + Register key_reg = ToRegister(instr->key()); + __ movsxlq(key_reg, key_reg); + } + + if (instr->hydrogen()->RequiresHoleCheck()) { + int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + + sizeof(kHoleNanLower32); + Operand hole_check_operand = BuildFastArrayOperand( + instr->elements(), + instr->key(), + FAST_DOUBLE_ELEMENTS, + offset, + instr->additional_index()); + __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32)); + DeoptimizeIf(equal, instr->environment()); + } + + Operand double_load_operand = BuildFastArrayOperand( instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS, - offset); - __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32)); - DeoptimizeIf(equal, instr->environment()); - - Operand double_load_operand = BuildFastArrayOperand( - instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS, - FixedDoubleArray::kHeaderSize - kHeapObjectTag); + FixedDoubleArray::kHeaderSize - kHeapObjectTag, + instr->additional_index()); __ movsd(result, double_load_operand); } @@ -2408,7 +2432,8 @@ Operand LCodeGen::BuildFastArrayOperand( LOperand* elements_pointer, LOperand* key, ElementsKind elements_kind, - uint32_t offset) { + uint32_t offset, + uint32_t additional_index) { Register elements_pointer_reg = ToRegister(elements_pointer); int shift_size = ElementsKindToShiftSize(elements_kind); if (key->IsConstantOperand()) { @@ -2417,11 +2442,14 @@ Operand LCodeGen::BuildFastArrayOperand( Abort("array index constant value too big"); } return Operand(elements_pointer_reg, - constant_value * (1 << shift_size) + offset); + ((constant_value + additional_index) << shift_size) + + offset); } else { ScaleFactor scale_factor = static_cast(shift_size); - return Operand(elements_pointer_reg, ToRegister(key), - scale_factor, offset); + return Operand(elements_pointer_reg, + ToRegister(key), + scale_factor, + offset + (additional_index << shift_size)); } } @@ -2430,7 +2458,17 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( LLoadKeyedSpecializedArrayElement* instr) { ElementsKind elements_kind = instr->elements_kind(); Operand operand(BuildFastArrayOperand(instr->external_pointer(), - instr->key(), elements_kind, 0)); + instr->key(), + elements_kind, + 0, + instr->additional_index())); + if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) { + // Sign extend key because it could be a 32 bit negative value + // and the dehoisted address computation happens in 64 bits + Register key_reg = ToRegister(instr->key()); + __ movsxlq(key_reg, key_reg); + } + if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { XMMRegister result(ToDoubleRegister(instr->result())); __ movss(result, operand); @@ -2467,8 +2505,11 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3332,7 +3373,18 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( LStoreKeyedSpecializedArrayElement* instr) { ElementsKind elements_kind = instr->elements_kind(); Operand operand(BuildFastArrayOperand(instr->external_pointer(), - instr->key(), elements_kind, 0)); + instr->key(), + elements_kind, + 0, + instr->additional_index())); + + if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) { + // Sign extend key because it could be a 32 bit negative value + // and the dehoisted address computation happens in 64 bits + Register key_reg = ToRegister(instr->key()); + __ movsxlq(key_reg, key_reg); + } + if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { XMMRegister value(ToDoubleRegister(instr->value())); __ cvtsd2ss(value, value); @@ -3358,8 +3410,11 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3402,30 +3457,29 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { Register elements = ToRegister(instr->object()); Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; - // Do the store. - if (instr->key()->IsConstantOperand()) { - ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); - LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - int offset = - ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize; - __ movq(FieldOperand(elements, offset), value); - } else { - __ movq(FieldOperand(elements, - key, - times_pointer_size, - FixedArray::kHeaderSize), - value); + Operand operand = + BuildFastArrayOperand(instr->object(), + instr->key(), + FAST_ELEMENTS, + FixedArray::kHeaderSize - kHeapObjectTag, + instr->additional_index()); + + if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) { + // Sign extend key because it could be a 32 bit negative value + // and the dehoisted address computation happens in 64 bits + Register key_reg = ToRegister(instr->key()); + __ movsxlq(key_reg, key_reg); } + __ movq(operand, value); + if (instr->hydrogen()->NeedsWriteBarrier()) { + ASSERT(!instr->key()->IsConstantOperand()); HType type = instr->hydrogen()->value()->type(); SmiCheck check_needed = type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; // Compute address of modified element and store it into key register. - __ lea(key, FieldOperand(elements, - key, - times_pointer_size, - FixedArray::kHeaderSize)); + __ lea(key, operand); __ RecordWrite(elements, key, value, @@ -3454,8 +3508,19 @@ void LCodeGen::DoStoreKeyedFastDoubleElement( } Operand double_store_operand = BuildFastArrayOperand( - instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS, - FixedDoubleArray::kHeaderSize - kHeapObjectTag); + instr->elements(), + instr->key(), + FAST_DOUBLE_ELEMENTS, + FixedDoubleArray::kHeaderSize - kHeapObjectTag, + instr->additional_index()); + + if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) { + // Sign extend key because it could be a 32 bit negative value + // and the dehoisted address computation happens in 64 bits + Register key_reg = ToRegister(instr->key()); + __ movsxlq(key_reg, key_reg); + } + __ movsd(double_store_operand, value); } @@ -3484,21 +3549,22 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map); __ j(not_equal, ¬_applicable); __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT); - if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) { + if (IsSimpleMapChangeTransition(from_kind, to_kind)) { __ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg); // Write barrier. ASSERT_NE(instr->temp_reg(), NULL); __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, ToRegister(instr->temp_reg()), kDontSaveFPRegs); - } else if (from_kind == FAST_SMI_ONLY_ELEMENTS && - to_kind == FAST_DOUBLE_ELEMENTS) { + } else if (IsFastSmiElementsKind(from_kind) && + IsFastDoubleElementsKind(to_kind)) { Register fixed_object_reg = ToRegister(instr->temp_reg()); ASSERT(fixed_object_reg.is(rdx)); ASSERT(new_map_reg.is(rbx)); __ movq(fixed_object_reg, object_reg); CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(), RelocInfo::CODE_TARGET, instr); - } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) { + } else if (IsFastDoubleElementsKind(from_kind) && + IsFastObjectElementsKind(to_kind)) { Register fixed_object_reg = ToRegister(instr->temp_reg()); ASSERT(fixed_object_reg.is(rdx)); ASSERT(new_map_reg.is(rbx)); @@ -4172,8 +4238,9 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { // Deopt if the array literal boilerplate ElementsKind is of a type different // than the expected one. The check isn't necessary if the boilerplate has - // already been converted to FAST_ELEMENTS. - if (boilerplate_elements_kind != FAST_ELEMENTS) { + // already been converted to TERMINAL_FAST_ELEMENTS_KIND. + if (CanTransitionToMoreGeneralFastElementsKind( + boilerplate_elements_kind, true)) { __ LoadHeapObject(rax, instr->hydrogen()->boilerplate_object()); __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); // Load the map's "bit field 2". @@ -4319,10 +4386,11 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) { ElementsKind boilerplate_elements_kind = instr->hydrogen()->boilerplate()->GetElementsKind(); - // Deopt if the literal boilerplate ElementsKind is of a type different than - // the expected one. The check isn't necessary if the boilerplate has already - // been converted to FAST_ELEMENTS. - if (boilerplate_elements_kind != FAST_ELEMENTS) { + // Deopt if the array literal boilerplate ElementsKind is of a type different + // than the expected one. The check isn't necessary if the boilerplate has + // already been converted to TERMINAL_FAST_ELEMENTS_KIND. + if (CanTransitionToMoreGeneralFastElementsKind( + boilerplate_elements_kind, true)) { __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate()); __ movq(rcx, FieldOperand(rbx, HeapObject::kMapOffset)); // Load the map's "bit field 2". diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h index 1331fba..73e1a9b 100644 --- a/deps/v8/src/x64/lithium-codegen-x64.h +++ b/deps/v8/src/x64/lithium-codegen-x64.h @@ -231,7 +231,8 @@ class LCodeGen BASE_EMBEDDED { LOperand* elements_pointer, LOperand* key, ElementsKind elements_kind, - uint32_t offset); + uint32_t offset, + uint32_t additional_index = 0); // Specific math operations - used from DoUnaryMathOperation. void EmitIntegerMathAbs(LUnaryMathOperation* instr); diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc index 3ba0cae..6094dbb 100644 --- a/deps/v8/src/x64/lithium-x64.cc +++ b/deps/v8/src/x64/lithium-x64.cc @@ -2012,8 +2012,9 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LInstruction* LChunkBuilder::DoTransitionElementsKind( HTransitionElementsKind* instr) { - if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS && - instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) { + ElementsKind from_kind = instr->original_map()->elements_kind(); + ElementsKind to_kind = instr->transitioned_map()->elements_kind(); + if (IsSimpleMapChangeTransition(from_kind, to_kind)) { LOperand* object = UseRegister(instr->object()); LOperand* new_map_reg = TempRegister(); LOperand* temp_reg = TempRegister(); diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h index 9083c1f..642a0a0 100644 --- a/deps/v8/src/x64/lithium-x64.h +++ b/deps/v8/src/x64/lithium-x64.h @@ -1199,6 +1199,7 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> { LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1215,13 +1216,13 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> { LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { public: - LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, - LOperand* key) { + LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) { inputs_[0] = external_pointer; inputs_[1] = key; } @@ -1235,6 +1236,7 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1692,6 +1694,7 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> { LOperand* object() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } LOperand* value() { return inputs_[2]; } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1716,6 +1719,7 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> { LOperand* value() { return inputs_[2]; } bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1739,6 +1743,7 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> { ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc index 3d380a2..95b43f4 100644 --- a/deps/v8/src/x64/macro-assembler-x64.cc +++ b/deps/v8/src/x64/macro-assembler-x64.cc @@ -2658,10 +2658,12 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) { void MacroAssembler::CheckFastElements(Register map, Label* fail, Label::Distance distance) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); - STATIC_ASSERT(FAST_ELEMENTS == 1); + STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); + STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); + STATIC_ASSERT(FAST_ELEMENTS == 2); + STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); cmpb(FieldOperand(map, Map::kBitField2Offset), - Immediate(Map::kMaximumBitField2FastElementValue)); + Immediate(Map::kMaximumBitField2FastHoleyElementValue)); j(above, fail, distance); } @@ -2669,23 +2671,26 @@ void MacroAssembler::CheckFastElements(Register map, void MacroAssembler::CheckFastObjectElements(Register map, Label* fail, Label::Distance distance) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); - STATIC_ASSERT(FAST_ELEMENTS == 1); + STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); + STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); + STATIC_ASSERT(FAST_ELEMENTS == 2); + STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); cmpb(FieldOperand(map, Map::kBitField2Offset), - Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue)); + Immediate(Map::kMaximumBitField2FastHoleySmiElementValue)); j(below_equal, fail, distance); cmpb(FieldOperand(map, Map::kBitField2Offset), - Immediate(Map::kMaximumBitField2FastElementValue)); + Immediate(Map::kMaximumBitField2FastHoleyElementValue)); j(above, fail, distance); } -void MacroAssembler::CheckFastSmiOnlyElements(Register map, - Label* fail, - Label::Distance distance) { - STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0); +void MacroAssembler::CheckFastSmiElements(Register map, + Label* fail, + Label::Distance distance) { + STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); + STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); cmpb(FieldOperand(map, Map::kBitField2Offset), - Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue)); + Immediate(Map::kMaximumBitField2FastHoleySmiElementValue)); j(above, fail, distance); } @@ -2749,24 +2754,18 @@ void MacroAssembler::CompareMap(Register obj, CompareMapMode mode) { Cmp(FieldOperand(obj, HeapObject::kMapOffset), map); if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) { - Map* transitioned_fast_element_map( - map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL)); - ASSERT(transitioned_fast_element_map == NULL || - map->elements_kind() != FAST_ELEMENTS); - if (transitioned_fast_element_map != NULL) { - j(equal, early_success, Label::kNear); - Cmp(FieldOperand(obj, HeapObject::kMapOffset), - Handle(transitioned_fast_element_map)); - } - - Map* transitioned_double_map( - map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL)); - ASSERT(transitioned_double_map == NULL || - map->elements_kind() == FAST_SMI_ONLY_ELEMENTS); - if (transitioned_double_map != NULL) { - j(equal, early_success, Label::kNear); - Cmp(FieldOperand(obj, HeapObject::kMapOffset), - Handle(transitioned_double_map)); + ElementsKind kind = map->elements_kind(); + if (IsFastElementsKind(kind)) { + bool packed = IsFastPackedElementsKind(kind); + Map* current_map = *map; + while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) { + kind = GetNextMoreGeneralFastElementsKind(kind, packed); + current_map = current_map->LookupElementsTransitionMap(kind, NULL); + if (!current_map) break; + j(equal, early_success, Label::kNear); + Cmp(FieldOperand(obj, HeapObject::kMapOffset), + Handle(current_map)); + } } } } @@ -4057,27 +4056,38 @@ void MacroAssembler::LoadTransitionedArrayMapConditional( movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset)); // Check that the function's map is the same as the expected cached map. - int expected_index = - Context::GetContextMapIndexFromElementsKind(expected_kind); - cmpq(map_in_out, Operand(scratch, Context::SlotOffset(expected_index))); + movq(scratch, Operand(scratch, + Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX))); + + int offset = expected_kind * kPointerSize + + FixedArrayBase::kHeaderSize; + cmpq(map_in_out, FieldOperand(scratch, offset)); j(not_equal, no_map_match); // Use the transitioned cached map. - int trans_index = - Context::GetContextMapIndexFromElementsKind(transitioned_kind); - movq(map_in_out, Operand(scratch, Context::SlotOffset(trans_index))); + offset = transitioned_kind * kPointerSize + + FixedArrayBase::kHeaderSize; + movq(map_in_out, FieldOperand(scratch, offset)); } void MacroAssembler::LoadInitialArrayMap( - Register function_in, Register scratch, Register map_out) { + Register function_in, Register scratch, + Register map_out, bool can_have_holes) { ASSERT(!function_in.is(map_out)); Label done; movq(map_out, FieldOperand(function_in, JSFunction::kPrototypeOrInitialMapOffset)); if (!FLAG_smi_only_arrays) { - LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, - FAST_ELEMENTS, + ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS; + LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, + kind, + map_out, + scratch, + &done); + } else if (can_have_holes) { + LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, + FAST_HOLEY_SMI_ELEMENTS, map_out, scratch, &done); diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h index 66587d5..1c1cd95 100644 --- a/deps/v8/src/x64/macro-assembler-x64.h +++ b/deps/v8/src/x64/macro-assembler-x64.h @@ -877,9 +877,9 @@ class MacroAssembler: public Assembler { // Check if a map for a JSObject indicates that the object has fast smi only // elements. Jump to the specified label if it does not. - void CheckFastSmiOnlyElements(Register map, - Label* fail, - Label::Distance distance = Label::kFar); + void CheckFastSmiElements(Register map, + Label* fail, + Label::Distance distance = Label::kFar); // Check to see if maybe_number can be stored as a double in // FastDoubleElements. If it can, store it at the index specified by index in @@ -1141,7 +1141,8 @@ class MacroAssembler: public Assembler { // Load the initial map for new Arrays from a JSFunction. void LoadInitialArrayMap(Register function_in, Register scratch, - Register map_out); + Register map_out, + bool can_have_holes); // Load the global function with the given index. void LoadGlobalFunction(int index, Register function); diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc index bf232bf..cb1e029 100644 --- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc +++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -44,21 +44,23 @@ namespace internal { /* * This assembler uses the following register assignment convention - * - rdx : currently loaded character(s) as ASCII or UC16. Must be loaded using - * LoadCurrentCharacter before using any of the dispatch methods. - * - rdi : current position in input, as negative offset from end of string. + * - rdx : Currently loaded character(s) as ASCII or UC16. Must be loaded + * using LoadCurrentCharacter before using any of the dispatch methods. + * Temporarily stores the index of capture start after a matching pass + * for a global regexp. + * - rdi : Current position in input, as negative offset from end of string. * Please notice that this is the byte offset, not the character - * offset! Is always a 32-bit signed (negative) offset, but must be + * offset! Is always a 32-bit signed (negative) offset, but must be * maintained sign-extended to 64 bits, since it is used as index. - * - rsi : end of input (points to byte after last character in input), + * - rsi : End of input (points to byte after last character in input), * so that rsi+rdi points to the current character. - * - rbp : frame pointer. Used to access arguments, local variables and + * - rbp : Frame pointer. Used to access arguments, local variables and * RegExp registers. - * - rsp : points to tip of C stack. - * - rcx : points to tip of backtrack stack. The backtrack stack contains - * only 32-bit values. Most are offsets from some base (e.g., character + * - rsp : Points to tip of C stack. + * - rcx : Points to tip of backtrack stack. The backtrack stack contains + * only 32-bit values. Most are offsets from some base (e.g., character * positions from end of string or code location from Code* pointer). - * - r8 : code object pointer. Used to convert between absolute and + * - r8 : Code object pointer. Used to convert between absolute and * code-object-relative addresses. * * The registers rax, rbx, r9 and r11 are free to use for computations. @@ -72,20 +74,22 @@ namespace internal { * * The stack will have the following content, in some order, indexable from the * frame pointer (see, e.g., kStackHighEnd): - * - Isolate* isolate (Address of the current isolate) + * - Isolate* isolate (address of the current isolate) * - direct_call (if 1, direct call from JavaScript code, if 0 call * through the runtime system) - * - stack_area_base (High end of the memory area to use as + * - stack_area_base (high end of the memory area to use as * backtracking stack) + * - capture array size (may fit multiple sets of matches) * - int* capture_array (int[num_saved_registers_], for output). - * - end of input (Address of end of string) - * - start of input (Address of first character in string) + * - end of input (address of end of string) + * - start of input (address of first character in string) * - start index (character index of start) * - String* input_string (input string) * - return address * - backup of callee save registers (rbx, possibly rsi and rdi). + * - success counter (only useful for global regexp to count matches) * - Offset of location before start of input (effectively character - * position -1). Used to initialize capture registers to a non-position. + * position -1). Used to initialize capture registers to a non-position. * - At start of string (if 1, we are starting at the start of the * string, otherwise 0) * - register 0 rbp[-n] (Only positions must be stored in the first @@ -94,7 +98,7 @@ namespace internal { * * The first num_saved_registers_ registers are initialized to point to * "character -1" in the string (i.e., char_size() bytes before the first - * character of the string). The remaining registers starts out uninitialized. + * character of the string). The remaining registers starts out uninitialized. * * The first seven values must be provided by the calling code by * calling the code's entry address cast to a function pointer with the @@ -744,13 +748,16 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type, void RegExpMacroAssemblerX64::Fail() { - ASSERT(FAILURE == 0); // Return value for failure is zero. - __ Set(rax, 0); + STATIC_ASSERT(FAILURE == 0); // Return value for failure is zero. + if (!global()) { + __ Set(rax, FAILURE); + } __ jmp(&exit_label_); } Handle RegExpMacroAssemblerX64::GetCode(Handle source) { + Label return_rax; // Finalize code - write the entry point code now we know how many // registers we need. // Entry code: @@ -784,7 +791,7 @@ Handle RegExpMacroAssemblerX64::GetCode(Handle source) { ASSERT_EQ(kInputStart, -3 * kPointerSize); ASSERT_EQ(kInputEnd, -4 * kPointerSize); ASSERT_EQ(kRegisterOutput, -5 * kPointerSize); - ASSERT_EQ(kStackHighEnd, -6 * kPointerSize); + ASSERT_EQ(kNumOutputRegisters, -6 * kPointerSize); __ push(rdi); __ push(rsi); __ push(rdx); @@ -795,7 +802,8 @@ Handle RegExpMacroAssemblerX64::GetCode(Handle source) { __ push(rbx); // Callee-save #endif - __ push(Immediate(0)); // Make room for "at start" constant. + __ push(Immediate(0)); // Number of successful matches in a global regexp. + __ push(Immediate(0)); // Make room for "input start - 1" constant. // Check if we have space on the stack for registers. Label stack_limit_hit; @@ -815,14 +823,14 @@ Handle RegExpMacroAssemblerX64::GetCode(Handle source) { // Exit with OutOfMemory exception. There is not enough space on the stack // for our working registers. __ Set(rax, EXCEPTION); - __ jmp(&exit_label_); + __ jmp(&return_rax); __ bind(&stack_limit_hit); __ Move(code_object_pointer(), masm_.CodeObject()); CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp. __ testq(rax, rax); // If returned value is non-zero, we exit with the returned value as result. - __ j(not_zero, &exit_label_); + __ j(not_zero, &return_rax); __ bind(&stack_ok); @@ -847,19 +855,7 @@ Handle RegExpMacroAssemblerX64::GetCode(Handle source) { // position registers. __ movq(Operand(rbp, kInputStartMinusOne), rax); - if (num_saved_registers_ > 0) { - // Fill saved registers with initial value = start offset - 1 - // Fill in stack push order, to avoid accessing across an unwritten - // page (a problem on Windows). - __ Set(rcx, kRegisterZero); - Label init_loop; - __ bind(&init_loop); - __ movq(Operand(rbp, rcx, times_1, 0), rax); - __ subq(rcx, Immediate(kPointerSize)); - __ cmpq(rcx, - Immediate(kRegisterZero - num_saved_registers_ * kPointerSize)); - __ j(greater, &init_loop); - } +#ifdef WIN32 // Ensure that we have written to each stack page, in order. Skipping a page // on Windows can cause segmentation faults. Assuming page size is 4k. const int kPageSize = 4096; @@ -869,21 +865,49 @@ Handle RegExpMacroAssemblerX64::GetCode(Handle source) { i += kRegistersPerPage) { __ movq(register_location(i), rax); // One write every page. } +#endif // WIN32 - // Initialize backtrack stack pointer. - __ movq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd)); // Initialize code object pointer. __ Move(code_object_pointer(), masm_.CodeObject()); - // Load previous char as initial value of current-character. - Label at_start; - __ cmpb(Operand(rbp, kStartIndex), Immediate(0)); - __ j(equal, &at_start); - LoadCurrentCharacterUnchecked(-1, 1); // Load previous char. - __ jmp(&start_label_); - __ bind(&at_start); + + Label load_char_start_regexp, start_regexp; + // Load newline if index is at start, previous character otherwise. + __ cmpl(Operand(rbp, kStartIndex), Immediate(0)); + __ j(not_equal, &load_char_start_regexp, Label::kNear); __ Set(current_character(), '\n'); - __ jmp(&start_label_); + __ jmp(&start_regexp, Label::kNear); + + // Global regexp restarts matching here. + __ bind(&load_char_start_regexp); + // Load previous char as initial value of current character register. + LoadCurrentCharacterUnchecked(-1, 1); + __ bind(&start_regexp); + + // Initialize on-stack registers. + if (num_saved_registers_ > 0) { + // Fill saved registers with initial value = start offset - 1 + // Fill in stack push order, to avoid accessing across an unwritten + // page (a problem on Windows). + if (num_saved_registers_ > 8) { + __ Set(rcx, kRegisterZero); + Label init_loop; + __ bind(&init_loop); + __ movq(Operand(rbp, rcx, times_1, 0), rax); + __ subq(rcx, Immediate(kPointerSize)); + __ cmpq(rcx, + Immediate(kRegisterZero - num_saved_registers_ * kPointerSize)); + __ j(greater, &init_loop); + } else { // Unroll the loop. + for (int i = 0; i < num_saved_registers_; i++) { + __ movq(register_location(i), rax); + } + } + } + + // Initialize backtrack stack pointer. + __ movq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd)); + __ jmp(&start_label_); // Exit code: if (success_label_.is_linked()) { @@ -902,6 +926,10 @@ Handle RegExpMacroAssemblerX64::GetCode(Handle source) { } for (int i = 0; i < num_saved_registers_; i++) { __ movq(rax, register_location(i)); + if (i == 0 && global()) { + // Keep capture start in rdx for the zero-length check later. + __ movq(rdx, rax); + } __ addq(rax, rcx); // Convert to index from start, not end. if (mode_ == UC16) { __ sar(rax, Immediate(1)); // Convert byte index to character index. @@ -909,12 +937,54 @@ Handle RegExpMacroAssemblerX64::GetCode(Handle source) { __ movl(Operand(rbx, i * kIntSize), rax); } } - __ Set(rax, SUCCESS); + + if (global()) { + // Restart matching if the regular expression is flagged as global. + // Increment success counter. + __ incq(Operand(rbp, kSuccessfulCaptures)); + // Capture results have been stored, so the number of remaining global + // output registers is reduced by the number of stored captures. + __ movsxlq(rcx, Operand(rbp, kNumOutputRegisters)); + __ subq(rcx, Immediate(num_saved_registers_)); + // Check whether we have enough room for another set of capture results. + __ cmpq(rcx, Immediate(num_saved_registers_)); + __ j(less, &exit_label_); + + __ movq(Operand(rbp, kNumOutputRegisters), rcx); + // Advance the location for output. + __ addq(Operand(rbp, kRegisterOutput), + Immediate(num_saved_registers_ * kIntSize)); + + // Prepare rax to initialize registers with its value in the next run. + __ movq(rax, Operand(rbp, kInputStartMinusOne)); + + // Special case for zero-length matches. + // rdx: capture start index + __ cmpq(rdi, rdx); + // Not a zero-length match, restart. + __ j(not_equal, &load_char_start_regexp); + // rdi (offset from the end) is zero if we already reached the end. + __ testq(rdi, rdi); + __ j(zero, &exit_label_, Label::kNear); + // Advance current position after a zero-length match. + if (mode_ == UC16) { + __ addq(rdi, Immediate(2)); + } else { + __ incq(rdi); + } + __ jmp(&load_char_start_regexp); + } else { + __ movq(rax, Immediate(SUCCESS)); + } } - // Exit and return rax __ bind(&exit_label_); + if (global()) { + // Return the number of successful captures. + __ movq(rax, Operand(rbp, kSuccessfulCaptures)); + } + __ bind(&return_rax); #ifdef _WIN64 // Restore callee save registers. __ lea(rsp, Operand(rbp, kLastCalleeSaveRegister)); @@ -951,7 +1021,7 @@ Handle RegExpMacroAssemblerX64::GetCode(Handle source) { __ testq(rax, rax); // If returning non-zero, we should end execution with the given // result as return value. - __ j(not_zero, &exit_label_); + __ j(not_zero, &return_rax); // Restore registers. __ Move(code_object_pointer(), masm_.CodeObject()); @@ -1012,7 +1082,7 @@ Handle RegExpMacroAssemblerX64::GetCode(Handle source) { __ bind(&exit_with_exception); // Exit with Result EXCEPTION(-1) to signal thrown exception. __ Set(rax, EXCEPTION); - __ jmp(&exit_label_); + __ jmp(&return_rax); } FixupCodeRelativePositions(); @@ -1135,8 +1205,9 @@ void RegExpMacroAssemblerX64::SetRegister(int register_index, int to) { } -void RegExpMacroAssemblerX64::Succeed() { +bool RegExpMacroAssemblerX64::Succeed() { __ jmp(&success_label_); + return global(); } diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.h b/deps/v8/src/x64/regexp-macro-assembler-x64.h index cd24b60..31fc8ef 100644 --- a/deps/v8/src/x64/regexp-macro-assembler-x64.h +++ b/deps/v8/src/x64/regexp-macro-assembler-x64.h @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -109,7 +109,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler { virtual void ReadStackPointerFromRegister(int reg); virtual void SetCurrentPositionFromEnd(int by); virtual void SetRegister(int register_index, int to); - virtual void Succeed(); + virtual bool Succeed(); virtual void WriteCurrentPositionToRegister(int reg, int cp_offset); virtual void ClearRegisters(int reg_from, int reg_to); virtual void WriteStackPointerToRegister(int reg); @@ -154,7 +154,12 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler { static const int kInputStart = kStartIndex + kPointerSize; static const int kInputEnd = kInputStart + kPointerSize; static const int kRegisterOutput = kInputEnd + kPointerSize; - static const int kStackHighEnd = kRegisterOutput + kPointerSize; + // For the case of global regular expression, we have room to store at least + // one set of capture results. For the case of non-global regexp, we ignore + // this value. NumOutputRegisters is passed as 32-bit value. The upper + // 32 bit of this 64-bit stack slot may contain garbage. + static const int kNumOutputRegisters = kRegisterOutput + kPointerSize; + static const int kStackHighEnd = kNumOutputRegisters + kPointerSize; // DirectCall is passed as 32 bit int (values 0 or 1). static const int kDirectCall = kStackHighEnd + kPointerSize; static const int kIsolate = kDirectCall + kPointerSize; @@ -167,8 +172,12 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler { static const int kInputStart = kStartIndex - kPointerSize; static const int kInputEnd = kInputStart - kPointerSize; static const int kRegisterOutput = kInputEnd - kPointerSize; - static const int kStackHighEnd = kRegisterOutput - kPointerSize; - static const int kDirectCall = kFrameAlign; + // For the case of global regular expression, we have room to store at least + // one set of capture results. For the case of non-global regexp, we ignore + // this value. + static const int kNumOutputRegisters = kRegisterOutput - kPointerSize; + static const int kStackHighEnd = kFrameAlign; + static const int kDirectCall = kStackHighEnd + kPointerSize; static const int kIsolate = kDirectCall + kPointerSize; #endif @@ -183,14 +192,14 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler { // AMD64 Calling Convention has only one callee-save register that // we use. We push this after the frame pointer (and after the // parameters). - static const int kBackup_rbx = kStackHighEnd - kPointerSize; + static const int kBackup_rbx = kNumOutputRegisters - kPointerSize; static const int kLastCalleeSaveRegister = kBackup_rbx; #endif + static const int kSuccessfulCaptures = kLastCalleeSaveRegister - kPointerSize; // When adding local variables remember to push space for them in // the frame in GetCode. - static const int kInputStartMinusOne = - kLastCalleeSaveRegister - kPointerSize; + static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize; // First register address. Following registers are below it on the stack. static const int kRegisterZero = kInputStartMinusOne - kPointerSize; diff --git a/deps/v8/src/x64/simulator-x64.h b/deps/v8/src/x64/simulator-x64.h index df8423a..8aba701 100644 --- a/deps/v8/src/x64/simulator-x64.h +++ b/deps/v8/src/x64/simulator-x64.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -40,12 +40,12 @@ namespace internal { (entry(p0, p1, p2, p3, p4)) typedef int (*regexp_matcher)(String*, int, const byte*, - const byte*, int*, Address, int, Isolate*); + const byte*, int*, int, Address, int, Isolate*); // Call the generated regexp code directly. The code at the entry address should // expect eight int/pointer sized arguments and return an int. -#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \ - (FUNCTION_CAST(entry)(p0, p1, p2, p3, p4, p5, p6, p7)) +#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ + (FUNCTION_CAST(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8)) #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ (reinterpret_cast(try_catch_address)) diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc index 5721e9b..1b8ed38 100644 --- a/deps/v8/src/x64/stub-cache-x64.cc +++ b/deps/v8/src/x64/stub-cache-x64.cc @@ -1434,17 +1434,32 @@ Handle CallStubCompiler::CompileArrayPushCall( __ jmp(&fast_object); // In case of fast smi-only, convert to fast object, otherwise bail out. __ bind(¬_fast_object); - __ CheckFastSmiOnlyElements(rbx, &call_builtin); + __ CheckFastSmiElements(rbx, &call_builtin); // rdx: receiver // rbx: map - __ movq(r9, rdi); // Backup rdi as it is going to be trashed. - __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS, + + Label try_holey_map; + __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, rbx, rdi, + &try_holey_map); + + ElementsTransitionGenerator:: + GenerateMapChangeElementsTransition(masm()); + // Restore edi. + __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset)); + __ jmp(&fast_object); + + __ bind(&try_holey_map); + __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS, + FAST_HOLEY_ELEMENTS, + rbx, + rdi, &call_builtin); - ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm()); - __ movq(rdi, r9); + ElementsTransitionGenerator:: + GenerateMapChangeElementsTransition(masm()); + __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset)); __ bind(&fast_object); } else { __ CheckFastObjectElements(rbx, &call_builtin); @@ -3369,8 +3384,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( __ movsd(Operand(rbx, rdi, times_8, 0), xmm0); break; case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3435,8 +3453,11 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( case EXTERNAL_FLOAT_ELEMENTS: case EXTERNAL_DOUBLE_ELEMENTS: case FAST_ELEMENTS: - case FAST_SMI_ONLY_ELEMENTS: + case FAST_SMI_ELEMENTS: case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: UNREACHABLE(); @@ -3587,7 +3608,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( // Check that the key is a smi or a heap number convertible to a smi. GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic); - if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { + if (IsFastSmiElementsKind(elements_kind)) { __ JumpIfNotSmi(rax, &transition_elements_kind); } @@ -3611,13 +3632,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ j(not_equal, &miss_force_generic); __ bind(&finish_store); - if (elements_kind == FAST_SMI_ONLY_ELEMENTS) { + if (IsFastSmiElementsKind(elements_kind)) { __ SmiToInteger32(rcx, rcx); __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize), rax); } else { // Do the store and update the write barrier. - ASSERT(elements_kind == FAST_ELEMENTS); + ASSERT(IsFastObjectElementsKind(elements_kind)); __ SmiToInteger32(rcx, rcx); __ lea(rcx, FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize)); diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status index af28be1..3cbc3bc 100644 --- a/deps/v8/test/cctest/cctest.status +++ b/deps/v8/test/cctest/cctest.status @@ -27,6 +27,7 @@ prefix cctest +# All tests prefixed with 'Bug' are expected to fail. test-api/Bug*: FAIL ############################################################################## diff --git a/deps/v8/test/cctest/test-func-name-inference.cc b/deps/v8/test/cctest/test-func-name-inference.cc index 8f405b7..762cc9f 100644 --- a/deps/v8/test/cctest/test-func-name-inference.cc +++ b/deps/v8/test/cctest/test-func-name-inference.cc @@ -400,3 +400,41 @@ TEST(AssignmentAndCall) { // See MultipleAssignments test. CheckFunctionName(script, "return 2", "Enclosing.Bar"); } + + +TEST(MethodAssignmentInAnonymousFunctionCall) { + InitializeVM(); + v8::HandleScope scope; + + v8::Handle script = Compile( + "(function () {\n" + " var EventSource = function () { };\n" + " EventSource.prototype.addListener = function () {\n" + " return 2012;\n" + " };\n" + " this.PublicEventSource = EventSource;\n" + "})();"); + CheckFunctionName(script, "return 2012", "EventSource.addListener"); +} + + +TEST(ReturnAnonymousFunction) { + InitializeVM(); + v8::HandleScope scope; + + v8::Handle script = Compile( + "(function() {\n" + " function wrapCode() {\n" + " return function () {\n" + " return 2012;\n" + " };\n" + " };\n" + " var foo = 10;\n" + " function f() {\n" + " return wrapCode();\n" + " }\n" + " this.ref = f;\n" + "})()"); + script->Run(); + CheckFunctionName(script, "return 2012", ""); +} diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc index cbe8d44..c405b33 100644 --- a/deps/v8/test/cctest/test-heap-profiler.cc +++ b/deps/v8/test/cctest/test-heap-profiler.cc @@ -7,6 +7,7 @@ #include "v8.h" #include "cctest.h" +#include "hashmap.h" #include "heap-profiler.h" #include "snapshot.h" #include "debug.h" @@ -27,10 +28,14 @@ class NamedEntriesDetector { if (strcmp(entry->name(), "C2") == 0) has_C2 = true; } + static bool AddressesMatch(void* key1, void* key2) { + return key1 == key2; + } + void CheckAllReachables(i::HeapEntry* root) { + i::HashMap visited(AddressesMatch); i::List list(10); list.Add(root); - root->paint(); CheckEntry(root); while (!list.is_empty()) { i::HeapEntry* entry = list.RemoveLast(); @@ -38,11 +43,15 @@ class NamedEntriesDetector { for (int i = 0; i < children.length(); ++i) { if (children[i]->type() == i::HeapGraphEdge::kShortcut) continue; i::HeapEntry* child = children[i]->to(); - if (!child->painted()) { - list.Add(child); - child->paint(); - CheckEntry(child); - } + i::HashMap::Entry* entry = visited.Lookup( + reinterpret_cast(child), + static_cast(reinterpret_cast(child)), + true); + if (entry->value) + continue; + entry->value = reinterpret_cast(1); + list.Add(child); + CheckEntry(child); } } } @@ -105,9 +114,6 @@ TEST(HeapSnapshot) { "var c2 = new C2(a2);"); const v8::HeapSnapshot* snapshot_env2 = v8::HeapProfiler::TakeSnapshot(v8_str("env2")); - i::HeapSnapshot* i_snapshot_env2 = - const_cast( - reinterpret_cast(snapshot_env2)); const v8::HeapGraphNode* global_env2 = GetGlobalObject(snapshot_env2); // Verify, that JS global object of env2 has '..2' properties. @@ -120,9 +126,7 @@ TEST(HeapSnapshot) { NULL, GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "b2_2")); CHECK_NE(NULL, GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "c2")); - // Paint all nodes reachable from global object. NamedEntriesDetector det; - i_snapshot_env2->ClearPaint(); det.CheckAllReachables(const_cast( reinterpret_cast(global_env2))); CHECK(det.has_A2); @@ -156,9 +160,9 @@ TEST(HeapSnapshotObjectSizes) { CHECK_NE(NULL, x2); // Test sizes. - CHECK_EQ(x->GetSelfSize() * 3, x->GetRetainedSize()); - CHECK_EQ(x1->GetSelfSize(), x1->GetRetainedSize()); - CHECK_EQ(x2->GetSelfSize(), x2->GetRetainedSize()); + CHECK_NE(0, x->GetSelfSize()); + CHECK_NE(0, x1->GetSelfSize()); + CHECK_NE(0, x2->GetSelfSize()); } @@ -477,66 +481,6 @@ TEST(HeapSnapshotRootPreservedAfterSorting) { } -TEST(HeapEntryDominator) { - // The graph looks like this: - // - // -> node1 - // a |^ - // -> node5 ba - // a v| - // node6 -> node2 - // b a |^ - // -> node4 ba - // b v| - // -> node3 - // - // The dominator for all nodes is node6. - - v8::HandleScope scope; - LocalContext env; - - CompileRun( - "function X(a, b) { this.a = a; this.b = b; }\n" - "node6 = new X(new X(new X()), new X(new X(),new X()));\n" - "(function(){\n" - "node6.a.a.b = node6.b.a; // node1 -> node2\n" - "node6.b.a.a = node6.a.a; // node2 -> node1\n" - "node6.b.a.b = node6.b.b; // node2 -> node3\n" - "node6.b.b.a = node6.b.a; // node3 -> node2\n" - "})();"); - - const v8::HeapSnapshot* snapshot = - v8::HeapProfiler::TakeSnapshot(v8_str("dominators")); - - const v8::HeapGraphNode* global = GetGlobalObject(snapshot); - CHECK_NE(NULL, global); - const v8::HeapGraphNode* node6 = - GetProperty(global, v8::HeapGraphEdge::kProperty, "node6"); - CHECK_NE(NULL, node6); - const v8::HeapGraphNode* node5 = - GetProperty(node6, v8::HeapGraphEdge::kProperty, "a"); - CHECK_NE(NULL, node5); - const v8::HeapGraphNode* node4 = - GetProperty(node6, v8::HeapGraphEdge::kProperty, "b"); - CHECK_NE(NULL, node4); - const v8::HeapGraphNode* node3 = - GetProperty(node4, v8::HeapGraphEdge::kProperty, "b"); - CHECK_NE(NULL, node3); - const v8::HeapGraphNode* node2 = - GetProperty(node4, v8::HeapGraphEdge::kProperty, "a"); - CHECK_NE(NULL, node2); - const v8::HeapGraphNode* node1 = - GetProperty(node5, v8::HeapGraphEdge::kProperty, "a"); - CHECK_NE(NULL, node1); - - CHECK_EQ(node6, node1->GetDominatorNode()); - CHECK_EQ(node6, node2->GetDominatorNode()); - CHECK_EQ(node6, node3->GetDominatorNode()); - CHECK_EQ(node6, node4->GetDominatorNode()); - CHECK_EQ(node6, node5->GetDominatorNode()); -} - - namespace { class TestJSONStream : public v8::OutputStream { diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc index 72079dc..33aaed3 100644 --- a/deps/v8/test/cctest/test-heap.cc +++ b/deps/v8/test/cctest/test-heap.cc @@ -673,7 +673,7 @@ TEST(JSArray) { array->SetElementsLength(Smi::FromInt(0))->ToObjectChecked(); CHECK_EQ(Smi::FromInt(0), array->length()); // Must be in fast mode. - CHECK(array->HasFastTypeElements()); + CHECK(array->HasFastSmiOrObjectElements()); // array[length] = name. array->SetElement(0, *name, NONE, kNonStrictMode)->ToObjectChecked(); @@ -811,7 +811,9 @@ TEST(Iteration) { // Allocate a JS array to OLD_POINTER_SPACE and NEW_SPACE objs[next_objs_index++] = FACTORY->NewJSArray(10); - objs[next_objs_index++] = FACTORY->NewJSArray(10, FAST_ELEMENTS, TENURED); + objs[next_objs_index++] = FACTORY->NewJSArray(10, + FAST_HOLEY_ELEMENTS, + TENURED); // Allocate a small string to OLD_DATA_SPACE and NEW_SPACE objs[next_objs_index++] = @@ -1595,7 +1597,7 @@ TEST(PrototypeTransitionClearing) { Handle prototype; PagedSpace* space = HEAP->old_pointer_space(); do { - prototype = FACTORY->NewJSArray(32 * KB, FAST_ELEMENTS, TENURED); + prototype = FACTORY->NewJSArray(32 * KB, FAST_HOLEY_ELEMENTS, TENURED); } while (space->FirstPage() == space->LastPage() || !space->LastPage()->Contains(prototype->address())); @@ -1735,3 +1737,60 @@ TEST(OptimizedAllocationAlwaysInNewSpace) { CHECK(HEAP->InNewSpace(*o)); } + + +static int CountMapTransitions(Map* map) { + int result = 0; + DescriptorArray* descs = map->instance_descriptors(); + for (int i = 0; i < descs->number_of_descriptors(); i++) { + if (descs->IsTransitionOnly(i)) { + result++; + } + } + return result; +} + + +// Test that map transitions are cleared and maps are collected with +// incremental marking as well. +TEST(Regress1465) { + i::FLAG_allow_natives_syntax = true; + i::FLAG_trace_incremental_marking = true; + InitializeVM(); + v8::HandleScope scope; + + #define TRANSITION_COUNT 256 + for (int i = 0; i < TRANSITION_COUNT; i++) { + EmbeddedVector buffer; + OS::SNPrintF(buffer, "var o = new Object; o.prop%d = %d;", i, i); + CompileRun(buffer.start()); + } + CompileRun("var root = new Object;"); + Handle root = + v8::Utils::OpenHandle( + *v8::Handle::Cast( + v8::Context::GetCurrent()->Global()->Get(v8_str("root")))); + + // Count number of live transitions before marking. + int transitions_before = CountMapTransitions(root->map()); + CompileRun("%DebugPrint(root);"); + CHECK_EQ(TRANSITION_COUNT, transitions_before); + + // Go through all incremental marking steps in one swoop. + IncrementalMarking* marking = HEAP->incremental_marking(); + CHECK(marking->IsStopped()); + marking->Start(); + CHECK(marking->IsMarking()); + while (!marking->IsComplete()) { + marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD); + } + CHECK(marking->IsComplete()); + HEAP->CollectAllGarbage(Heap::kNoGCFlags); + CHECK(marking->IsStopped()); + + // Count number of live transitions after marking. Note that one transition + // is left, because 'o' still holds an instance of one transition target. + int transitions_after = CountMapTransitions(root->map()); + CompileRun("%DebugPrint(root);"); + CHECK_EQ(1, transitions_after); +} diff --git a/deps/v8/test/cctest/test-mark-compact.cc b/deps/v8/test/cctest/test-mark-compact.cc index 700f322..2712370 100644 --- a/deps/v8/test/cctest/test-mark-compact.cc +++ b/deps/v8/test/cctest/test-mark-compact.cc @@ -531,18 +531,18 @@ TEST(BootUpMemoryUse) { // there we just skip the test. if (initial_memory >= 0) { InitializeVM(); - intptr_t booted_memory = MemoryInUse(); + intptr_t delta = MemoryInUse() - initial_memory; if (sizeof(initial_memory) == 8) { if (v8::internal::Snapshot::IsEnabled()) { - CHECK_LE(booted_memory - initial_memory, 3600 * 1024); // 3396. + CHECK_LE(delta, 3600 * 1024); // 3396. } else { - CHECK_LE(booted_memory - initial_memory, 3600 * 1024); // 3432. + CHECK_LE(delta, 4000 * 1024); // 3948. } } else { if (v8::internal::Snapshot::IsEnabled()) { - CHECK_LE(booted_memory - initial_memory, 2800 * 1024); // 2484. + CHECK_LE(delta, 2600 * 1024); // 2484. } else { - CHECK_LE(booted_memory - initial_memory, 2950 * 1024); // 2844 + CHECK_LE(delta, 2950 * 1024); // 2844 } } } diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc index e89e6cd..9b4f905 100644 --- a/deps/v8/test/cctest/test-regexp.cc +++ b/deps/v8/test/cctest/test-regexp.cc @@ -1,4 +1,4 @@ -// Copyright 2008 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -506,8 +506,13 @@ static RegExpNode* Compile(const char* input, bool multiline, bool is_ascii) { NewStringFromUtf8(CStrVector(input)); Handle sample_subject = isolate->factory()->NewStringFromUtf8(CStrVector("")); - RegExpEngine::Compile( - &compile_data, false, multiline, pattern, sample_subject, is_ascii); + RegExpEngine::Compile(&compile_data, + false, + false, + multiline, + pattern, + sample_subject, + is_ascii); return compile_data.node; } @@ -720,6 +725,7 @@ static ArchRegExpMacroAssembler::Result Execute(Code* code, input_start, input_end, captures, + 0, Isolate::Current()); } @@ -998,11 +1004,11 @@ TEST(MacroAssemblerNativeBackReferenceUC16) { int output[4]; NativeRegExpMacroAssembler::Result result = Execute(*code, - *input, - 0, - start_adr, - start_adr + input->length() * 2, - output); + *input, + 0, + start_adr, + start_adr + input->length() * 2, + output); CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result); CHECK_EQ(0, output[0]); diff --git a/deps/v8/test/mjsunit/accessor-map-sharing.js b/deps/v8/test/mjsunit/accessor-map-sharing.js index ab45afa..8bbcb4f 100644 --- a/deps/v8/test/mjsunit/accessor-map-sharing.js +++ b/deps/v8/test/mjsunit/accessor-map-sharing.js @@ -25,7 +25,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// Flags: --allow-natives-syntax +// Flags: --allow-natives-syntax --fast-accessor-properties // Handy abbreviations. var dp = Object.defineProperty; diff --git a/deps/v8/test/mjsunit/array-construct-transition.js b/deps/v8/test/mjsunit/array-construct-transition.js index 577e321..f8d7c83 100644 --- a/deps/v8/test/mjsunit/array-construct-transition.js +++ b/deps/v8/test/mjsunit/array-construct-transition.js @@ -27,13 +27,13 @@ // Flags: --allow-natives-syntax --smi-only-arrays -support_smi_only_arrays = %HasFastSmiOnlyElements(new Array(1,2,3,4,5,6,7,8)); +support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6)); if (support_smi_only_arrays) { var a = new Array(0, 1, 2); - assertTrue(%HasFastSmiOnlyElements(a)); + assertTrue(%HasFastSmiElements(a)); var b = new Array(0.5, 1.2, 2.3); assertTrue(%HasFastDoubleElements(b)); var c = new Array(0.5, 1.2, new Object()); - assertTrue(%HasFastElements(c)); + assertTrue(%HasFastObjectElements(c)); } diff --git a/deps/v8/test/mjsunit/array-literal-transitions.js b/deps/v8/test/mjsunit/array-literal-transitions.js index f657525..a96719d 100644 --- a/deps/v8/test/mjsunit/array-literal-transitions.js +++ b/deps/v8/test/mjsunit/array-literal-transitions.js @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -33,7 +33,7 @@ // in this test case. Depending on whether smi-only arrays are actually // enabled, this test takes the appropriate code path to check smi-only arrays. -support_smi_only_arrays = %HasFastSmiOnlyElements([1,2,3,4,5,6,7,8,9,10]); +support_smi_only_arrays = %HasFastSmiElements([1,2,3,4,5,6,7,8,9,10]); if (support_smi_only_arrays) { print("Tests include smi-only arrays."); @@ -46,14 +46,14 @@ function get(foo) { return foo; } // Used to generate dynamic values. function array_literal_test() { var a0 = [1, 2, 3]; - assertTrue(%HasFastSmiOnlyElements(a0)); + assertTrue(%HasFastSmiElements(a0)); var a1 = [get(1), get(2), get(3)]; - assertTrue(%HasFastSmiOnlyElements(a1)); + assertTrue(%HasFastSmiElements(a1)); var b0 = [1, 2, get("three")]; - assertTrue(%HasFastElements(b0)); + assertTrue(%HasFastObjectElements(b0)); var b1 = [get(1), get(2), get("three")]; - assertTrue(%HasFastElements(b1)); + assertTrue(%HasFastObjectElements(b1)); var c0 = [1, 2, get(3.5)]; assertTrue(%HasFastDoubleElements(c0)); @@ -75,7 +75,7 @@ function array_literal_test() { var object = new Object(); var d0 = [1, 2, object]; - assertTrue(%HasFastElements(d0)); + assertTrue(%HasFastObjectElements(d0)); assertEquals(object, d0[2]); assertEquals(2, d0[1]); assertEquals(1, d0[0]); @@ -87,7 +87,7 @@ function array_literal_test() { assertEquals(1, e0[0]); var f0 = [1, 2, [1, 2]]; - assertTrue(%HasFastElements(f0)); + assertTrue(%HasFastObjectElements(f0)); assertEquals([1,2], f0[2]); assertEquals(2, f0[1]); assertEquals(1, f0[0]); @@ -115,9 +115,9 @@ if (support_smi_only_arrays) { large = [ 0, 1, 2, 3, 4, 5, d(), d(), d(), d(), d(), d(), o(), o(), o(), o() ]; assertFalse(%HasDictionaryElements(large)); - assertFalse(%HasFastSmiOnlyElements(large)); + assertFalse(%HasFastSmiElements(large)); assertFalse(%HasFastDoubleElements(large)); - assertTrue(%HasFastElements(large)); + assertTrue(%HasFastObjectElements(large)); assertEquals(large, [0, 1, 2, 3, 4, 5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, new Object(), new Object(), new Object(), new Object()]); diff --git a/deps/v8/test/mjsunit/elements-kind.js b/deps/v8/test/mjsunit/elements-kind.js index 4aa79de..26b3c78 100644 --- a/deps/v8/test/mjsunit/elements-kind.js +++ b/deps/v8/test/mjsunit/elements-kind.js @@ -34,7 +34,7 @@ // in this test case. Depending on whether smi-only arrays are actually // enabled, this test takes the appropriate code path to check smi-only arrays. -support_smi_only_arrays = %HasFastSmiOnlyElements(new Array(1,2,3,4,5,6,7,8)); +support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8)); if (support_smi_only_arrays) { print("Tests include smi-only arrays."); @@ -59,8 +59,8 @@ var elements_kind = { } function getKind(obj) { - if (%HasFastSmiOnlyElements(obj)) return elements_kind.fast_smi_only; - if (%HasFastElements(obj)) return elements_kind.fast; + if (%HasFastSmiElements(obj)) return elements_kind.fast_smi_only; + if (%HasFastObjectElements(obj)) return elements_kind.fast; if (%HasFastDoubleElements(obj)) return elements_kind.fast_double; if (%HasDictionaryElements(obj)) return elements_kind.dictionary; // Every external kind is also an external array. @@ -116,7 +116,7 @@ if (support_smi_only_arrays) { assertKind(elements_kind.fast_smi_only, too); } -// Make sure the element kind transitions from smionly when a non-smi is stored. +// Make sure the element kind transitions from smi when a non-smi is stored. var you = new Array(); assertKind(elements_kind.fast_smi_only, you); for (var i = 0; i < 1337; i++) { diff --git a/deps/v8/test/mjsunit/elements-transition-hoisting.js b/deps/v8/test/mjsunit/elements-transition-hoisting.js index 5e78f10..50ca2a1 100644 --- a/deps/v8/test/mjsunit/elements-transition-hoisting.js +++ b/deps/v8/test/mjsunit/elements-transition-hoisting.js @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -31,7 +31,7 @@ // not hoisted) correctly, don't change the semantics programs and don't trigger // deopt through hoisting in important situations. -support_smi_only_arrays = %HasFastSmiOnlyElements(new Array(1,2,3,4,5,6)); +support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6)); if (support_smi_only_arrays) { print("Tests include smi-only arrays."); diff --git a/deps/v8/test/mjsunit/elements-transition.js b/deps/v8/test/mjsunit/elements-transition.js index 60e051b..0dffd37 100644 --- a/deps/v8/test/mjsunit/elements-transition.js +++ b/deps/v8/test/mjsunit/elements-transition.js @@ -27,7 +27,7 @@ // Flags: --allow-natives-syntax --smi-only-arrays -support_smi_only_arrays = %HasFastSmiOnlyElements(new Array(1,2,3,4,5,6,7,8)); +support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8)); if (support_smi_only_arrays) { print("Tests include smi-only arrays."); @@ -44,8 +44,8 @@ if (support_smi_only_arrays) { var array_1 = new Array(length); var array_2 = new Array(length); - assertTrue(%HasFastSmiOnlyElements(array_1)); - assertTrue(%HasFastSmiOnlyElements(array_2)); + assertTrue(%HasFastSmiElements(array_1)); + assertTrue(%HasFastSmiElements(array_2)); for (var i = 0; i < length; i++) { if (i == length - 5 && test_double) { // Trigger conversion to fast double elements at length-5. @@ -57,8 +57,8 @@ if (support_smi_only_arrays) { // Trigger conversion to fast object elements at length-3. set(array_1, i, 'object'); set(array_2, i, 'object'); - assertTrue(%HasFastElements(array_1)); - assertTrue(%HasFastElements(array_2)); + assertTrue(%HasFastObjectElements(array_1)); + assertTrue(%HasFastObjectElements(array_2)); } else if (i != length - 7) { // Set the element to an integer but leave a hole at length-7. set(array_1, i, 2*i+1); diff --git a/deps/v8/test/mjsunit/packed-elements.js b/deps/v8/test/mjsunit/packed-elements.js new file mode 100644 index 0000000..7f333e5 --- /dev/null +++ b/deps/v8/test/mjsunit/packed-elements.js @@ -0,0 +1,112 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Flags: --allow-natives-syntax --smi-only-arrays --packed-arrays + +var has_packed_elements = !%HasFastHoleyElements(Array()); + +function test1() { + var a = Array(8); + assertTrue(%HasFastSmiOrObjectElements(a)); + assertTrue(%HasFastHoleyElements(a)); +} + +function test2() { + var a = Array(); + assertTrue(%HasFastSmiOrObjectElements(a)); + assertFalse(%HasFastHoleyElements(a)); +} + +function test3() { + var a = Array(1,2,3,4,5,6,7); + assertTrue(%HasFastSmiOrObjectElements(a)); + assertFalse(%HasFastHoleyElements(a)); +} + +function test4() { + var a = [1, 2, 3, 4]; + assertTrue(%HasFastSmiElements(a)); + assertFalse(%HasFastHoleyElements(a)); + var b = [1, 2,, 4]; + assertTrue(%HasFastSmiElements(b)); + assertTrue(%HasFastHoleyElements(b)); +} + +function test5() { + var a = [1, 2, 3, 4.5]; + assertTrue(%HasFastDoubleElements(a)); + assertFalse(%HasFastHoleyElements(a)); + var b = [1,, 3.5, 4]; + assertTrue(%HasFastDoubleElements(b)); + assertTrue(%HasFastHoleyElements(b)); + var c = [1, 3.5,, 4]; + assertTrue(%HasFastDoubleElements(c)); + assertTrue(%HasFastHoleyElements(c)); +} + +function test6() { + var x = new Object(); + var a = [1, 2, 3.5, x]; + assertTrue(%HasFastObjectElements(a)); + assertFalse(%HasFastHoleyElements(a)); + assertEquals(1, a[0]); + assertEquals(2, a[1]); + assertEquals(3.5, a[2]); + assertEquals(x, a[3]); + var b = [1,, 3.5, x]; + assertTrue(%HasFastObjectElements(b)); + assertTrue(%HasFastHoleyElements(b)); + assertEquals(1, b[0]); + assertEquals(undefined, b[1]); + assertEquals(3.5, b[2]); + assertEquals(x, b[3]); + var c = [1, 3.5, x,,]; + assertTrue(%HasFastObjectElements(c)); + assertTrue(%HasFastHoleyElements(c)); + assertEquals(1, c[0]); + assertEquals(3.5, c[1]); + assertEquals(x, c[2]); + assertEquals(undefined, c[3]); +} + +function test_with_optimization(f) { + // Run tests in a loop to make sure that inlined Array() constructor runs out + // of new space memory and must fall back on runtime impl. + for (i = 0; i < 250000; ++i) f(); + %OptimizeFunctionOnNextCall(f); + for (i = 0; i < 250000; ++i) f(); // Make sure GC happens +} + +if (has_packed_elements) { + test_with_optimization(test1); + test_with_optimization(test2); + test_with_optimization(test3); + test_with_optimization(test4); + test_with_optimization(test5); + test_with_optimization(test6); +} + diff --git a/deps/v8/test/mjsunit/regexp-global.js b/deps/v8/test/mjsunit/regexp-global.js new file mode 100644 index 0000000..12f8578 --- /dev/null +++ b/deps/v8/test/mjsunit/regexp-global.js @@ -0,0 +1,132 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +// Test that an optional capture is cleared between two matches. +var str = "ABX X"; +str = str.replace(/(\w)?X/g, function(match, capture) { + assertTrue(match.indexOf(capture) >= 0 || + capture === undefined); + return capture ? capture.toLowerCase() : "-"; + }); +assertEquals("Ab -", str); + +// Test zero-length matches. +str = "Als Gregor Samsa eines Morgens"; +str = str.replace(/\b/g, function(match, capture) { + return "/"; + }); +assertEquals("/Als/ /Gregor/ /Samsa/ /eines/ /Morgens/", str); + +// Test zero-length matches that have non-zero-length sub-captures. +str = "It was a pleasure to burn."; +str = str.replace(/(?=(\w+))\b/g, function(match, capture) { + return capture.length; + }); +assertEquals("2It 3was 1a 8pleasure 2to 4burn.", str); + +// Test multiple captures. +str = "Try not. Do, or do not. There is no try."; +str = str.replace(/(not?)|(do)|(try)/gi, + function(match, c1, c2, c3) { + assertTrue((c1 === undefined && c2 === undefined) || + (c2 === undefined && c3 === undefined) || + (c1 === undefined && c3 === undefined)); + if (c1) return "-"; + if (c2) return "+"; + if (c3) return "=" + }); +assertEquals("= -. +, or + -. There is - =.", str); + +// Test multiple alternate captures. +str = "FOUR LEGS GOOD, TWO LEGS BAD!"; +str = str.replace(/(FOUR|TWO) LEGS (GOOD|BAD)/g, + function(match, num_legs, likeability) { + assertTrue(num_legs !== undefined); + assertTrue(likeability !== undefined); + if (num_legs == "FOUR") assertTrue(likeability == "GOOD"); + if (num_legs == "TWO") assertTrue(likeability == "BAD"); + return match.length - 10; + }); +assertEquals("4, 2!", str); + + +// The same tests with UC16. + +//Test that an optional capture is cleared between two matches. +str = "AB\u1234 \u1234"; +str = str.replace(/(\w)?\u1234/g, + function(match, capture) { + assertTrue(match.indexOf(capture) >= 0 || + capture === undefined); + return capture ? capture.toLowerCase() : "-"; + }); +assertEquals("Ab -", str); + +// Test zero-length matches. +str = "Als \u2623\u2642 eines Morgens"; +str = str.replace(/\b/g, function(match, capture) { + return "/"; + }); +assertEquals("/Als/ \u2623\u2642 /eines/ /Morgens/", str); + +// Test zero-length matches that have non-zero-length sub-captures. +str = "It was a pleasure to \u70e7."; +str = str.replace(/(?=(\w+))\b/g, function(match, capture) { + return capture.length; + }); +assertEquals("2It 3was 1a 8pleasure 2to \u70e7.", str); + +// Test multiple captures. +str = "Try not. D\u26aa, or d\u26aa not. There is no try."; +str = str.replace(/(not?)|(d\u26aa)|(try)/gi, + function(match, c1, c2, c3) { + assertTrue((c1 === undefined && c2 === undefined) || + (c2 === undefined && c3 === undefined) || + (c1 === undefined && c3 === undefined)); + if (c1) return "-"; + if (c2) return "+"; + if (c3) return "=" + }); +assertEquals("= -. +, or + -. There is - =.", str); + +// Test multiple alternate captures. +str = "FOUR \u817f GOOD, TWO \u817f BAD!"; +str = str.replace(/(FOUR|TWO) \u817f (GOOD|BAD)/g, + function(match, num_legs, likeability) { + assertTrue(num_legs !== undefined); + assertTrue(likeability !== undefined); + if (num_legs == "FOUR") assertTrue(likeability == "GOOD"); + if (num_legs == "TWO") assertTrue(likeability == "BAD"); + return match.length - 7; + }); +assertEquals("4, 2!", str); + +// Test capture that is a real substring. +var str = "Beasts of England, beasts of Ireland"; +str = str.replace(/(.*)/g, function(match) { return '~'; }); +assertEquals("~~", str); diff --git a/deps/v8/test/mjsunit/regexp.js b/deps/v8/test/mjsunit/regexp.js index ec82c96..c2d9282 100644 --- a/deps/v8/test/mjsunit/regexp.js +++ b/deps/v8/test/mjsunit/regexp.js @@ -705,3 +705,14 @@ assertThrows("RegExp('(?!*)')"); // Test trimmed regular expression for RegExp.test(). assertTrue(/.*abc/.test("abc")); assertFalse(/.*\d+/.test("q")); + +// Test that RegExp.prototype.toString() throws TypeError for +// incompatible receivers (ES5 section 15.10.6 and 15.10.6.4). +assertThrows("RegExp.prototype.toString.call(null)", TypeError); +assertThrows("RegExp.prototype.toString.call(0)", TypeError); +assertThrows("RegExp.prototype.toString.call('')", TypeError); +assertThrows("RegExp.prototype.toString.call(false)", TypeError); +assertThrows("RegExp.prototype.toString.call(true)", TypeError); +assertThrows("RegExp.prototype.toString.call([])", TypeError); +assertThrows("RegExp.prototype.toString.call({})", TypeError); +assertThrows("RegExp.prototype.toString.call(function(){})", TypeError); diff --git a/deps/v8/test/mjsunit/regress/regress-117409.js b/deps/v8/test/mjsunit/regress/regress-117409.js index 9222191..98aab5a 100644 --- a/deps/v8/test/mjsunit/regress/regress-117409.js +++ b/deps/v8/test/mjsunit/regress/regress-117409.js @@ -36,7 +36,7 @@ var literal = [1.2]; KeyedStoreIC(literal); KeyedStoreIC(literal); -// Trruncate array to 0 elements, at which point backing store will be replaced +// Truncate array to 0 elements, at which point backing store will be replaced // with empty fixed array. literal.length = 0; diff --git a/deps/v8/test/mjsunit/regress/regress-128018.js b/deps/v8/test/mjsunit/regress/regress-128018.js new file mode 100644 index 0000000..7bd1585 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-128018.js @@ -0,0 +1,35 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Flags: --expose-gc + +function KeyedStoreIC(a) { a[(1)] = Math.E; } +var literal = [1.2]; +literal.length = 0; +literal.push('0' && 0 ); +KeyedStoreIC(literal); +gc(); diff --git a/deps/v8/test/mjsunit/regress/regress-128146.js b/deps/v8/test/mjsunit/regress/regress-128146.js new file mode 100644 index 0000000..730dd91 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-128146.js @@ -0,0 +1,33 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Define accessor properties, resulting in an AccessorPair with 2 transitions. +Object.defineProperty({},"foo",{set:function(){},configurable:false}); +Object.defineProperty({},"foo",{get:function(){},configurable:false}); + +// Define a data property under the same name. +Object.defineProperty({},"foo",{}); diff --git a/deps/v8/test/mjsunit/regress/regress-1849.js b/deps/v8/test/mjsunit/regress/regress-1849.js index 176f918..5b8fc50 100644 --- a/deps/v8/test/mjsunit/regress/regress-1849.js +++ b/deps/v8/test/mjsunit/regress/regress-1849.js @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -25,7 +25,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// See: http://code.google.com/p/v8/issues/detail?id=1878 +// See: http://code.google.com/p/v8/issues/detail?id=1849 // Flags: --allow-natives-syntax @@ -36,4 +36,4 @@ for (var i = 0; i < count; i++) { arr[i] = 0; } assertFalse(%HasFastDoubleElements(arr)); -assertTrue(%HasFastSmiOnlyElements(arr)); +assertTrue(%HasFastSmiElements(arr)); diff --git a/deps/v8/test/mjsunit/regress/regress-1878.js b/deps/v8/test/mjsunit/regress/regress-1878.js index a1648b1..fbc47bd 100644 --- a/deps/v8/test/mjsunit/regress/regress-1878.js +++ b/deps/v8/test/mjsunit/regress/regress-1878.js @@ -34,11 +34,11 @@ var a = Array(); for (var i = 0; i < 1000; i++) { var ai = natives.InternalArray(10000); assertFalse(%HaveSameMap(ai, a)); - assertTrue(%HasFastElements(ai)); + assertTrue(%HasFastObjectElements(ai)); } for (var i = 0; i < 1000; i++) { var ai = new natives.InternalArray(10000); assertFalse(%HaveSameMap(ai, a)); - assertTrue(%HasFastElements(ai)); + assertTrue(%HasFastObjectElements(ai)); } diff --git a/deps/v8/test/mjsunit/regress/regress-2071.js b/deps/v8/test/mjsunit/regress/regress-2071.js new file mode 100644 index 0000000..91ae2a7 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-2071.js @@ -0,0 +1,79 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +a = {}; + +a.b = 42; + +with(a) { + a.f = (function f1() { + function f2() { + return b; + }; + return f2; + })(); +} + +for(var i = 0; i < 10000; i++) { + assertEquals(42, a.f()); +} + +with(a) { + a.g = (function f1() { + function f2() { + function f3() { + return b; + } + return f3; + }; + return f2(); + })(); +} + +for(var i = 0; i < 10000; i++) { + assertEquals(42, a.g()); +} + +function outer() { + with(a) { + a.h = (function f1() { + function f2() { + function f3() { + return b; + } + return f3; + }; + return f2(); + })(); + } +}; + +outer(); + +for(var i = 0; i < 10000; i++) { + assertEquals(42, a.h()); +} diff --git a/deps/v8/test/mjsunit/regress/regress-2153.js b/deps/v8/test/mjsunit/regress/regress-2153.js new file mode 100644 index 0000000..3170042 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-2153.js @@ -0,0 +1,32 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +var o = {}; +o.__defineGetter__('foo', function () { return null; }); +var o = {}; +o.foo = 42; +assertEquals(42, o.foo); diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-122271.js b/deps/v8/test/mjsunit/regress/regress-crbug-122271.js index 3a99a7fa..8ae91e8 100644 --- a/deps/v8/test/mjsunit/regress/regress-crbug-122271.js +++ b/deps/v8/test/mjsunit/regress/regress-crbug-122271.js @@ -39,11 +39,11 @@ function foo(array) { array.foo = "bar"; } -assertTrue(%HasFastSmiOnlyElements(a)); -assertTrue(%HasFastElements(b)); +assertTrue(%HasFastSmiElements(a)); +assertTrue(%HasFastObjectElements(b)); foo(a); foo(b); -assertTrue(%HasFastSmiOnlyElements(a)); -assertTrue(%HasFastElements(b)); +assertTrue(%HasFastSmiElements(a)); +assertTrue(%HasFastObjectElements(b)); diff --git a/deps/v8/test/mjsunit/regress/regress-smi-only-concat.js b/deps/v8/test/mjsunit/regress/regress-smi-only-concat.js index a9a6d89..55ca299 100644 --- a/deps/v8/test/mjsunit/regress/regress-smi-only-concat.js +++ b/deps/v8/test/mjsunit/regress/regress-smi-only-concat.js @@ -33,5 +33,5 @@ var fast_array = ['a', 'b']; var array = fast_array.concat(fast_array); -assertTrue(%HasFastElements(fast_array)); -assertTrue(%HasFastElements(array)); \ No newline at end of file +assertTrue(%HasFastObjectElements(fast_array)); +assertTrue(%HasFastObjectElements(array)); diff --git a/deps/v8/test/mjsunit/regress/regress-transcendental.js b/deps/v8/test/mjsunit/regress/regress-transcendental.js new file mode 100644 index 0000000..b5dbcb4 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-transcendental.js @@ -0,0 +1,49 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Flags: --expose-gc + +// Test whether the runtime implementation and generated code of +// sine and tangens return the same results. + +function test(f, x, name) { + // Reset transcendental cache. + gc(); + // Initializing cache leads to a runtime call. + var runtime_result = f(x); + // Flush transcendental cache entries and optimize f. + for (var i = 0; i < 100000; i++) f(i); + // Calculate using generated code. + var gencode_result = f(x); + print(name + " runtime function: " + runtime_result); + print(name + " generated code : " + gencode_result); + assertEquals(gencode_result, runtime_result); +} + +test(Math.tan, -1.57079632679489660000, "Math.tan"); +test(Math.sin, 6.283185307179586, "Math.sin"); + diff --git a/deps/v8/test/mjsunit/stack-traces.js b/deps/v8/test/mjsunit/stack-traces.js index 536e71b..438eec9 100644 --- a/deps/v8/test/mjsunit/stack-traces.js +++ b/deps/v8/test/mjsunit/stack-traces.js @@ -111,6 +111,18 @@ function testStrippedCustomError() { throw new CustomError("hep-hey", CustomError); } +MyObj = function() { FAIL; } + +MyObjCreator = function() {} + +MyObjCreator.prototype.Create = function() { + return new MyObj(); +} + +function testClassNames() { + (new MyObjCreator).Create(); +} + // Utility function for testing that the expected strings occur // in the stack trace produced when running the given function. function testTrace(name, fun, expected, unexpected) { @@ -254,6 +266,8 @@ testTrace("testDefaultCustomError", testDefaultCustomError, ["collectStackTrace"]); testTrace("testStrippedCustomError", testStrippedCustomError, ["hep-hey"], ["new CustomError", "collectStackTrace"]); +testTrace("testClassNames", testClassNames, + ["new MyObj", "MyObjCreator.Create"], ["as Create"]); testCallerCensorship(); testUnintendedCallerCensorship(); testErrorsDuringFormatting(); diff --git a/deps/v8/test/mjsunit/unbox-double-arrays.js b/deps/v8/test/mjsunit/unbox-double-arrays.js index fd7db28..ac03993 100644 --- a/deps/v8/test/mjsunit/unbox-double-arrays.js +++ b/deps/v8/test/mjsunit/unbox-double-arrays.js @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2012 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -278,7 +278,8 @@ function testOneArrayType(allocator) { expected_array_value(7)); %DeoptimizeFunction(test_various_loads6); - gc(); + %ClearFunctionTypeFeedback(test_various_stores); + %ClearFunctionTypeFeedback(test_various_loads7); // Test stores for non-NaN. var large_array = new allocator(large_array_size); @@ -376,7 +377,7 @@ delete large_array2[5]; // Convert back to fast elements and make sure the contents of the array are // unchanged. large_array2[25] = new Object(); -assertTrue(%HasFastElements(large_array2)); +assertTrue(%HasFastObjectElements(large_array2)); for (var i= 0; i < approx_dict_to_elements_threshold; i += 500 ) { if (i != 25 && i != 5) { assertEquals(expected_array_value(i), large_array2[i]); diff --git a/deps/v8/tools/fuzz-harness.sh b/deps/v8/tools/fuzz-harness.sh new file mode 100644 index 0000000..efbf864 --- /dev/null +++ b/deps/v8/tools/fuzz-harness.sh @@ -0,0 +1,92 @@ +#!/bin/bash +# Copyright 2012 the V8 project authors. All rights reserved. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# A simple harness that downloads and runs 'jsfunfuzz' against d8. This +# takes a long time because it runs many iterations and is intended for +# automated usage. The package containing 'jsfunfuzz' can be found as an +# attachment to this bug: +# https://bugzilla.mozilla.org/show_bug.cgi?id=jsfunfuzz + +JSFUNFUZZ_URL="https://bugzilla.mozilla.org/attachment.cgi?id=310631" +JSFUNFUZZ_MD5="d0e497201c5cd7bffbb1cdc1574f4e32" + +v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../) + +if [ -n "$1" ]; then + d8="${v8_root}/$1" +else + d8="${v8_root}/d8" +fi + +if [ ! -f "$d8" ]; then + echo "Failed to find d8 binary: $d8" + exit 1 +fi + +jsfunfuzz_file="$v8_root/tools/jsfunfuzz.zip" +if [ ! -f "$jsfunfuzz_file" ]; then + echo "Downloading $jsfunfuzz_file ..." + wget -q -O "$jsfunfuzz_file" $JSFUNFUZZ_URL || exit 1 +fi + +jsfunfuzz_sum=$(md5sum "$jsfunfuzz_file" | awk '{ print $1 }') +if [ $jsfunfuzz_sum != $JSFUNFUZZ_MD5 ]; then + echo "Failed to verify checksum!" + exit 1 +fi + +jsfunfuzz_dir="$v8_root/tools/jsfunfuzz" +if [ ! -d "$jsfunfuzz_dir" ]; then + echo "Unpacking into $jsfunfuzz_dir ..." + unzip "$jsfunfuzz_file" -d "$jsfunfuzz_dir" || exit 1 + echo "Patching runner ..." + cat << EOF | patch -s -p0 -d "$v8_root" +--- tools/jsfunfuzz/jsfunfuzz/multi_timed_run.py~ ++++ tools/jsfunfuzz/jsfunfuzz/multi_timed_run.py +@@ -125,7 +125,7 @@ + + def many_timed_runs(): + iteration = 0 +- while True: ++ while iteration < 100: + iteration += 1 + logfilename = "w%d" % iteration + one_timed_run(logfilename) +EOF +fi + +flags='--debug-code --expose-gc --verify-gc' +python -u "$jsfunfuzz_dir/jsfunfuzz/multi_timed_run.py" 300 \ + "$d8" $flags "$jsfunfuzz_dir/jsfunfuzz/jsfunfuzz.js" +exit_code=$(cat w* | grep " looking good" -c) +exit_code=$((100-exit_code)) +tar -cjf fuzz-results-$(date +%y%m%d).tar.bz2 err-* w* +rm -f err-* w* + +echo "Total failures: $exit_code" +exit $exit_code diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py index 29d4755..4071306 100755 --- a/deps/v8/tools/grokdump.py +++ b/deps/v8/tools/grokdump.py @@ -111,18 +111,56 @@ class Descriptor(object): def do_dump(reader, heap): """Dump all available memory regions.""" def dump_region(reader, start, size, location): - print "%s - %s" % (reader.FormatIntPtr(start), - reader.FormatIntPtr(start + size)) - for slot in xrange(start, - start + size, - reader.PointerSize()): - maybe_address = reader.ReadUIntPtr(slot) - heap_object = heap.FindObject(maybe_address) - print "%s: %s" % (reader.FormatIntPtr(slot), - reader.FormatIntPtr(maybe_address)) - if heap_object: - heap_object.Print(Printer()) - print + print + while start & 3 != 0: + start += 1 + size -= 1 + location += 1 + is_executable = reader.IsProbableExecutableRegion(location, size) + is_ascii = reader.IsProbableASCIIRegion(location, size) + + if is_executable is not False: + lines = reader.GetDisasmLines(start, size) + for line in lines: + print FormatDisasmLine(start, heap, line) + print + + if is_ascii is not False: + # Output in the same format as the Unix hd command + addr = start + for slot in xrange(location, location + size, 16): + hex_line = "" + asc_line = "" + for i in xrange(0, 16): + if slot + i < location + size: + byte = ctypes.c_uint8.from_buffer(reader.minidump, slot + i).value + if byte >= 0x20 and byte < 0x7f: + asc_line += chr(byte) + else: + asc_line += "." + hex_line += " %02x" % (byte) + else: + hex_line += " " + if i == 7: + hex_line += " " + print "%s %s |%s|" % (reader.FormatIntPtr(addr), + hex_line, + asc_line) + addr += 16 + + if is_executable is not True and is_ascii is not True: + print "%s - %s" % (reader.FormatIntPtr(start), + reader.FormatIntPtr(start + size)) + for slot in xrange(start, + start + size, + reader.PointerSize()): + maybe_address = reader.ReadUIntPtr(slot) + heap_object = heap.FindObject(maybe_address) + print "%s: %s" % (reader.FormatIntPtr(slot), + reader.FormatIntPtr(maybe_address)) + if heap_object: + heap_object.Print(Printer()) + print reader.ForEachMemoryRegion(dump_region) @@ -470,6 +508,64 @@ class MinidumpReader(object): elif self.arch == MD_CPU_ARCHITECTURE_X86: return ctypes.c_uint32.from_buffer(self.minidump, location).value + def IsProbableASCIIRegion(self, location, length): + ascii_bytes = 0 + non_ascii_bytes = 0 + for loc in xrange(location, location + length): + byte = ctypes.c_uint8.from_buffer(self.minidump, loc).value + if byte >= 0x7f: + non_ascii_bytes += 1 + if byte < 0x20 and byte != 0: + non_ascii_bytes += 1 + if byte < 0x7f and byte >= 0x20: + ascii_bytes += 1 + if byte == 0xa: # newline + ascii_bytes += 1 + if ascii_bytes * 10 <= length: + return False + if length > 0 and ascii_bytes > non_ascii_bytes * 7: + return True + if ascii_bytes > non_ascii_bytes * 3: + return None # Maybe + return False + + def IsProbableExecutableRegion(self, location, length): + opcode_bytes = 0 + sixty_four = self.arch == MD_CPU_ARCHITECTURE_AMD64 + for loc in xrange(location, location + length): + byte = ctypes.c_uint8.from_buffer(self.minidump, loc).value + if (byte == 0x8b or # mov + byte == 0x89 or # mov reg-reg + (byte & 0xf0) == 0x50 or # push/pop + (sixty_four and (byte & 0xf0) == 0x40) or # rex prefix + byte == 0xc3 or # return + byte == 0x74 or # jeq + byte == 0x84 or # jeq far + byte == 0x75 or # jne + byte == 0x85 or # jne far + byte == 0xe8 or # call + byte == 0xe9 or # jmp far + byte == 0xeb): # jmp near + opcode_bytes += 1 + opcode_percent = (opcode_bytes * 100) / length + threshold = 20 + if opcode_percent > threshold + 2: + return True + if opcode_percent > threshold - 2: + return None # Maybe + return False + + def FindRegion(self, addr): + answer = [-1, -1] + def is_in(reader, start, size, location): + if addr >= start and addr < start + size: + answer[0] = start + answer[1] = size + self.ForEachMemoryRegion(is_in) + if answer[0] == -1: + return None + return answer + def ForEachMemoryRegion(self, cb): if self.memory_list64 is not None: for r in self.memory_list64.ranges: @@ -1099,37 +1195,49 @@ class InspectionShell(cmd.Cmd): def AnalyzeMinidump(options, minidump_name): reader = MinidumpReader(options, minidump_name) + heap = None DebugPrint("========================================") if reader.exception is None: print "Minidump has no exception info" - return - print "Exception info:" - exception_thread = reader.thread_map[reader.exception.thread_id] - print " thread id: %d" % exception_thread.id - print " code: %08X" % reader.exception.exception.code - print " context:" - for r in CONTEXT_FOR_ARCH[reader.arch]: - print " %s: %s" % (r, reader.FormatIntPtr(reader.Register(r))) - # TODO(vitalyr): decode eflags. - print " eflags: %s" % bin(reader.exception_context.eflags)[2:] - print - - stack_top = reader.ExceptionSP() - stack_bottom = exception_thread.stack.start + \ - exception_thread.stack.memory.data_size - stack_map = {reader.ExceptionIP(): -1} - for slot in xrange(stack_top, stack_bottom, reader.PointerSize()): - maybe_address = reader.ReadUIntPtr(slot) - if not maybe_address in stack_map: - stack_map[maybe_address] = slot - heap = V8Heap(reader, stack_map) - - print "Disassembly around exception.eip:" - start = reader.ExceptionIP() - EIP_PROXIMITY - lines = reader.GetDisasmLines(start, 2 * EIP_PROXIMITY) - for line in lines: - print FormatDisasmLine(start, heap, line) - print + else: + print "Exception info:" + exception_thread = reader.thread_map[reader.exception.thread_id] + print " thread id: %d" % exception_thread.id + print " code: %08X" % reader.exception.exception.code + print " context:" + for r in CONTEXT_FOR_ARCH[reader.arch]: + print " %s: %s" % (r, reader.FormatIntPtr(reader.Register(r))) + # TODO(vitalyr): decode eflags. + print " eflags: %s" % bin(reader.exception_context.eflags)[2:] + print + + stack_top = reader.ExceptionSP() + stack_bottom = exception_thread.stack.start + \ + exception_thread.stack.memory.data_size + stack_map = {reader.ExceptionIP(): -1} + for slot in xrange(stack_top, stack_bottom, reader.PointerSize()): + maybe_address = reader.ReadUIntPtr(slot) + if not maybe_address in stack_map: + stack_map[maybe_address] = slot + heap = V8Heap(reader, stack_map) + + print "Disassembly around exception.eip:" + disasm_start = reader.ExceptionIP() - EIP_PROXIMITY + disasm_bytes = 2 * EIP_PROXIMITY + if (options.full): + full_range = reader.FindRegion(reader.ExceptionIP()) + if full_range is not None: + disasm_start = full_range[0] + disasm_bytes = full_range[1] + + lines = reader.GetDisasmLines(disasm_start, disasm_bytes) + + for line in lines: + print FormatDisasmLine(disasm_start, heap, line) + print + + if heap is None: + heap = V8Heap(reader, None) if options.full: do_dump(reader, heap) @@ -1137,15 +1245,16 @@ def AnalyzeMinidump(options, minidump_name): if options.shell: InspectionShell(reader, heap).cmdloop("type help to get help") else: - print "Annotated stack (from exception.esp to bottom):" - for slot in xrange(stack_top, stack_bottom, reader.PointerSize()): - maybe_address = reader.ReadUIntPtr(slot) - heap_object = heap.FindObject(maybe_address) - print "%s: %s" % (reader.FormatIntPtr(slot), - reader.FormatIntPtr(maybe_address)) - if heap_object: - heap_object.Print(Printer()) - print + if reader.exception is not None: + print "Annotated stack (from exception.esp to bottom):" + for slot in xrange(stack_top, stack_bottom, reader.PointerSize()): + maybe_address = reader.ReadUIntPtr(slot) + heap_object = heap.FindObject(maybe_address) + print "%s: %s" % (reader.FormatIntPtr(slot), + reader.FormatIntPtr(maybe_address)) + if heap_object: + heap_object.Print(Printer()) + print reader.Dispose() diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp index 46f85fe..ea82d31 100644 --- a/deps/v8/tools/gyp/v8.gyp +++ b/deps/v8/tools/gyp/v8.gyp @@ -58,31 +58,22 @@ # has some sources to link into the component. '../../src/v8dll-main.cc', ], + 'defines': [ + 'V8_SHARED', + 'BUILDING_V8_SHARED', + ], + 'direct_dependent_settings': { + 'defines': [ + 'V8_SHARED', + 'USING_V8_SHARED', + ], + }, 'conditions': [ ['OS=="mac"', { 'xcode_settings': { 'OTHER_LDFLAGS': ['-dynamiclib', '-all_load'] }, }], - ['OS=="win"', { - 'defines': [ - 'BUILDING_V8_SHARED', - ], - 'direct_dependent_settings': { - 'defines': [ - 'USING_V8_SHARED', - ], - }, - }, { - 'defines': [ - 'V8_SHARED', - ], - 'direct_dependent_settings': { - 'defines': [ - 'V8_SHARED', - ], - }, - }], ['soname_version!=""', { 'product_extension': 'so.<(soname_version)', }], @@ -110,27 +101,16 @@ 'dependencies': ['mksnapshot', 'js2c'], }], ['component=="shared_library"', { - 'conditions': [ - ['OS=="win"', { - 'defines': [ - 'BUILDING_V8_SHARED', - ], - 'direct_dependent_settings': { - 'defines': [ - 'USING_V8_SHARED', - ], - }, - }, { - 'defines': [ - 'V8_SHARED', - ], - 'direct_dependent_settings': { - 'defines': [ - 'V8_SHARED', - ], - }, - }], + 'defines': [ + 'V8_SHARED', + 'BUILDING_V8_SHARED', ], + 'direct_dependent_settings': { + 'defines': [ + 'V8_SHARED', + 'USING_V8_SHARED', + ], + }, }], ], 'dependencies': [ @@ -315,6 +295,8 @@ '../../src/dtoa.h', '../../src/elements.cc', '../../src/elements.h', + '../../src/elements-kind.cc', + '../../src/elements-kind.h', '../../src/execution.cc', '../../src/execution.h', '../../src/factory.cc', diff --git a/deps/v8/tools/js2c.py b/deps/v8/tools/js2c.py index fa559f3..d06cbe4 100644 --- a/deps/v8/tools/js2c.py +++ b/deps/v8/tools/js2c.py @@ -1,6 +1,6 @@ #!/usr/bin/env python # -# Copyright 2006-2008 the V8 project authors. All rights reserved. +# Copyright 2012 the V8 project authors. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: @@ -195,14 +195,14 @@ def ReadMacros(lines): macro_match = MACRO_PATTERN.match(line) if macro_match: name = macro_match.group(1) - args = map(string.strip, macro_match.group(2).split(',')) + args = [match.strip() for match in macro_match.group(2).split(',')] body = macro_match.group(3).strip() macros.append((re.compile("\\b%s\\(" % name), TextMacro(args, body))) else: python_match = PYTHON_MACRO_PATTERN.match(line) if python_match: name = python_match.group(1) - args = map(string.strip, python_match.group(2).split(',')) + args = [match.strip() for match in python_match.group(2).split(',')] body = python_match.group(3).strip() fun = eval("lambda " + ",".join(args) + ': ' + body) macros.append((re.compile("\\b%s\\(" % name), PythonMacro(args, fun))) diff --git a/deps/v8/tools/jsmin.py b/deps/v8/tools/jsmin.py index e82f3d0..250dea9 100644 --- a/deps/v8/tools/jsmin.py +++ b/deps/v8/tools/jsmin.py @@ -1,6 +1,6 @@ #!/usr/bin/python2.4 -# Copyright 2009 the V8 project authors. All rights reserved. +# Copyright 2012 the V8 project authors. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: @@ -154,7 +154,7 @@ class JavaScriptMinifier(object): return var_name while True: identifier_first_char = self.identifier_counter % 52 - identifier_second_char = self.identifier_counter / 52 + identifier_second_char = self.identifier_counter // 52 new_identifier = self.CharFromNumber(identifier_first_char) if identifier_second_char != 0: new_identifier = ( diff --git a/deps/v8/tools/test-wrapper-gypbuild.py b/deps/v8/tools/test-wrapper-gypbuild.py index eda2459..27b5a35 100755 --- a/deps/v8/tools/test-wrapper-gypbuild.py +++ b/deps/v8/tools/test-wrapper-gypbuild.py @@ -224,7 +224,8 @@ def Main(): print ">>> running presubmit tests" returncodes += subprocess.call([workspace + '/tools/presubmit.py']) - args_for_children = [workspace + '/tools/test.py'] + PassOnOptions(options) + args_for_children = ['python'] + args_for_children += [workspace + '/tools/test.py'] + PassOnOptions(options) args_for_children += ['--no-build', '--build-system=gyp'] for arg in args: args_for_children += [arg] @@ -240,10 +241,14 @@ def Main(): shellpath = workspace + '/' + options.outdir + '/' + arch + '.' + mode env['LD_LIBRARY_PATH'] = shellpath + '/lib.target' shell = shellpath + "/d8" - child = subprocess.Popen(' '.join(args_for_children + - ['--arch=' + arch] + - ['--mode=' + mode] + - ['--shell=' + shell]), + cmdline = ' '.join(args_for_children + + ['--arch=' + arch] + + ['--mode=' + mode] + + ['--shell=' + shell]) + # TODO(jkummerow): This print is temporary. + print "Executing: %s" % cmdline + + child = subprocess.Popen(cmdline, shell=True, cwd=workspace, env=env) -- 2.7.4