Updated V8 from git://github.com/v8/v8.git to 3e6ec7e018bbf2c63ef04b85ff688198ea204c04
authorPeter Varga <pvarga@inf.u-szeged.hu>
Wed, 13 Jun 2012 07:21:17 +0000 (09:21 +0200)
committerQt by Nokia <qt-info@nokia.com>
Wed, 13 Jun 2012 07:55:50 +0000 (09:55 +0200)
Update V8 source to version 3.11.4

* Performance and stability improvements on all platforms.
* Fixed native ARM build (issues 1744, 539)
* Fixed several bugs in heap profiles (including issue 2078).
* Throw syntax errors on illegal escape sequences.
* Made handling of const more consistent when combined with 'eval' and
  'with'.
* Fixed V8 on MinGW-x64 (issue 2026).
* Put new global var semantics behind a flag until WebKit tests are
  cleaned up.
* Enabled inlining some V8 API functions.
* Enabled MIPS cross-compilation.
* Implemented clearing of CompareICs (issue 2102).
* Fixed python deprecations. (issue 1391)
* Fixed GCC 4.7 (C++11) compilation. (issue 2136)

Change-Id: I72594bd22356391dd55e315c022d0c9f3fd5b451
Reviewed-by: Kent Hansen <kent.hansen@nokia.com>
267 files changed:
src/3rdparty/v8/ChangeLog
src/3rdparty/v8/DEPS [new file with mode: 0644]
src/3rdparty/v8/Makefile
src/3rdparty/v8/SConstruct
src/3rdparty/v8/build/armu.gypi [deleted file]
src/3rdparty/v8/build/common.gypi
src/3rdparty/v8/build/gyp_v8
src/3rdparty/v8/build/mipsu.gypi [deleted file]
src/3rdparty/v8/build/standalone.gypi
src/3rdparty/v8/include/v8-debug.h [changed mode: 0644->0755]
src/3rdparty/v8/include/v8-profiler.h
src/3rdparty/v8/include/v8.h
src/3rdparty/v8/samples/lineprocessor.cc
src/3rdparty/v8/samples/samples.gyp
src/3rdparty/v8/src/api.cc
src/3rdparty/v8/src/apiutils.h
src/3rdparty/v8/src/arguments.h
src/3rdparty/v8/src/arm/code-stubs-arm.cc
src/3rdparty/v8/src/arm/cpu-arm.cc
src/3rdparty/v8/src/arm/debug-arm.cc
src/3rdparty/v8/src/arm/deoptimizer-arm.cc
src/3rdparty/v8/src/arm/full-codegen-arm.cc
src/3rdparty/v8/src/arm/ic-arm.cc
src/3rdparty/v8/src/arm/lithium-arm.cc
src/3rdparty/v8/src/arm/lithium-arm.h
src/3rdparty/v8/src/arm/lithium-codegen-arm.cc
src/3rdparty/v8/src/arm/lithium-codegen-arm.h
src/3rdparty/v8/src/arm/macro-assembler-arm.cc
src/3rdparty/v8/src/arm/macro-assembler-arm.h
src/3rdparty/v8/src/arm/stub-cache-arm.cc
src/3rdparty/v8/src/array.js
src/3rdparty/v8/src/assembler.h
src/3rdparty/v8/src/ast.cc
src/3rdparty/v8/src/ast.h
src/3rdparty/v8/src/atomicops.h
src/3rdparty/v8/src/atomicops_internals_arm_qnx.h [deleted file]
src/3rdparty/v8/src/bootstrapper.cc
src/3rdparty/v8/src/builtins.cc
src/3rdparty/v8/src/builtins.h
src/3rdparty/v8/src/code-stubs.cc
src/3rdparty/v8/src/code-stubs.h
src/3rdparty/v8/src/compiler-intrinsics.h
src/3rdparty/v8/src/compiler.cc
src/3rdparty/v8/src/compiler.h
src/3rdparty/v8/src/contexts.cc
src/3rdparty/v8/src/contexts.h
src/3rdparty/v8/src/d8.cc
src/3rdparty/v8/src/d8.h
src/3rdparty/v8/src/debug-agent.cc
src/3rdparty/v8/src/debug-debugger.js
src/3rdparty/v8/src/debug.cc
src/3rdparty/v8/src/debug.h
src/3rdparty/v8/src/double.h
src/3rdparty/v8/src/elements.cc
src/3rdparty/v8/src/elements.h
src/3rdparty/v8/src/execution.cc
src/3rdparty/v8/src/execution.h
src/3rdparty/v8/src/factory.cc
src/3rdparty/v8/src/factory.h
src/3rdparty/v8/src/flag-definitions.h
src/3rdparty/v8/src/frames.cc
src/3rdparty/v8/src/frames.h
src/3rdparty/v8/src/full-codegen.cc
src/3rdparty/v8/src/full-codegen.h
src/3rdparty/v8/src/globals.h
src/3rdparty/v8/src/handles.cc
src/3rdparty/v8/src/hashmap.h
src/3rdparty/v8/src/heap-inl.h
src/3rdparty/v8/src/heap-profiler.cc
src/3rdparty/v8/src/heap-profiler.h
src/3rdparty/v8/src/heap.cc
src/3rdparty/v8/src/heap.h
src/3rdparty/v8/src/hydrogen-instructions.cc
src/3rdparty/v8/src/hydrogen-instructions.h
src/3rdparty/v8/src/hydrogen.cc
src/3rdparty/v8/src/hydrogen.h
src/3rdparty/v8/src/ia32/assembler-ia32.h
src/3rdparty/v8/src/ia32/builtins-ia32.cc
src/3rdparty/v8/src/ia32/code-stubs-ia32.cc
src/3rdparty/v8/src/ia32/codegen-ia32.cc
src/3rdparty/v8/src/ia32/debug-ia32.cc
src/3rdparty/v8/src/ia32/deoptimizer-ia32.cc
src/3rdparty/v8/src/ia32/full-codegen-ia32.cc
src/3rdparty/v8/src/ia32/ic-ia32.cc
src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc
src/3rdparty/v8/src/ia32/lithium-codegen-ia32.h
src/3rdparty/v8/src/ia32/lithium-ia32.cc
src/3rdparty/v8/src/ia32/lithium-ia32.h
src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc
src/3rdparty/v8/src/ia32/macro-assembler-ia32.h
src/3rdparty/v8/src/ia32/stub-cache-ia32.cc
src/3rdparty/v8/src/ic.cc
src/3rdparty/v8/src/ic.h
src/3rdparty/v8/src/incremental-marking-inl.h
src/3rdparty/v8/src/incremental-marking.cc
src/3rdparty/v8/src/incremental-marking.h
src/3rdparty/v8/src/interface.cc
src/3rdparty/v8/src/interface.h
src/3rdparty/v8/src/isolate.cc
src/3rdparty/v8/src/isolate.h
src/3rdparty/v8/src/jsregexp.cc
src/3rdparty/v8/src/jsregexp.h
src/3rdparty/v8/src/lazy-instance.h
src/3rdparty/v8/src/list-inl.h
src/3rdparty/v8/src/list.h
src/3rdparty/v8/src/lithium-allocator.cc
src/3rdparty/v8/src/liveedit-debugger.js
src/3rdparty/v8/src/liveedit.cc
src/3rdparty/v8/src/macros.py
src/3rdparty/v8/src/mark-compact-inl.h
src/3rdparty/v8/src/mark-compact.cc
src/3rdparty/v8/src/mark-compact.h
src/3rdparty/v8/src/math.js
src/3rdparty/v8/src/messages.js
src/3rdparty/v8/src/mips/assembler-mips.cc
src/3rdparty/v8/src/mips/assembler-mips.h
src/3rdparty/v8/src/mips/code-stubs-mips.cc
src/3rdparty/v8/src/mips/constants-mips.h
src/3rdparty/v8/src/mips/debug-mips.cc
src/3rdparty/v8/src/mips/deoptimizer-mips.cc
src/3rdparty/v8/src/mips/full-codegen-mips.cc
src/3rdparty/v8/src/mips/ic-mips.cc
src/3rdparty/v8/src/mips/lithium-codegen-mips.cc
src/3rdparty/v8/src/mips/lithium-codegen-mips.h
src/3rdparty/v8/src/mips/lithium-mips.cc
src/3rdparty/v8/src/mips/lithium-mips.h
src/3rdparty/v8/src/mips/macro-assembler-mips.cc
src/3rdparty/v8/src/mips/macro-assembler-mips.h
src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.cc
src/3rdparty/v8/src/mips/stub-cache-mips.cc
src/3rdparty/v8/src/mirror-debugger.js
src/3rdparty/v8/src/objects-debug.cc
src/3rdparty/v8/src/objects-inl.h
src/3rdparty/v8/src/objects-printer.cc
src/3rdparty/v8/src/objects-visiting-inl.h
src/3rdparty/v8/src/objects-visiting.cc
src/3rdparty/v8/src/objects-visiting.h
src/3rdparty/v8/src/objects.cc
src/3rdparty/v8/src/objects.h
src/3rdparty/v8/src/parser.cc
src/3rdparty/v8/src/platform-cygwin.cc
src/3rdparty/v8/src/platform-freebsd.cc
src/3rdparty/v8/src/platform-linux.cc
src/3rdparty/v8/src/platform-macos.cc
src/3rdparty/v8/src/platform-nullos.cc
src/3rdparty/v8/src/platform-openbsd.cc
src/3rdparty/v8/src/platform-posix.cc
src/3rdparty/v8/src/platform-qnx.cc [deleted file]
src/3rdparty/v8/src/platform-solaris.cc
src/3rdparty/v8/src/platform-win32.cc
src/3rdparty/v8/src/platform.h
src/3rdparty/v8/src/preparser.cc
src/3rdparty/v8/src/preparser.h
src/3rdparty/v8/src/prettyprinter.cc
src/3rdparty/v8/src/profile-generator-inl.h
src/3rdparty/v8/src/profile-generator.cc
src/3rdparty/v8/src/profile-generator.h
src/3rdparty/v8/src/property.h
src/3rdparty/v8/src/regexp.js
src/3rdparty/v8/src/runtime.cc
src/3rdparty/v8/src/runtime.h
src/3rdparty/v8/src/runtime.js
src/3rdparty/v8/src/scanner.cc
src/3rdparty/v8/src/scanner.h
src/3rdparty/v8/src/scopeinfo.cc
src/3rdparty/v8/src/scopes.cc
src/3rdparty/v8/src/scopes.h
src/3rdparty/v8/src/serialize.cc
src/3rdparty/v8/src/spaces-inl.h
src/3rdparty/v8/src/spaces.cc
src/3rdparty/v8/src/spaces.h
src/3rdparty/v8/src/string.js
src/3rdparty/v8/src/stub-cache.cc
src/3rdparty/v8/src/stub-cache.h
src/3rdparty/v8/src/utils.cc
src/3rdparty/v8/src/utils.h
src/3rdparty/v8/src/v8.cc
src/3rdparty/v8/src/v8globals.h
src/3rdparty/v8/src/v8utils.h
src/3rdparty/v8/src/variables.cc
src/3rdparty/v8/src/variables.h
src/3rdparty/v8/src/version.cc
src/3rdparty/v8/src/x64/assembler-x64.h
src/3rdparty/v8/src/x64/code-stubs-x64.cc
src/3rdparty/v8/src/x64/debug-x64.cc
src/3rdparty/v8/src/x64/deoptimizer-x64.cc
src/3rdparty/v8/src/x64/full-codegen-x64.cc
src/3rdparty/v8/src/x64/ic-x64.cc
src/3rdparty/v8/src/x64/lithium-codegen-x64.cc
src/3rdparty/v8/src/x64/lithium-codegen-x64.h
src/3rdparty/v8/src/x64/lithium-x64.cc
src/3rdparty/v8/src/x64/lithium-x64.h
src/3rdparty/v8/src/x64/macro-assembler-x64.cc
src/3rdparty/v8/src/x64/macro-assembler-x64.h
src/3rdparty/v8/src/x64/stub-cache-x64.cc
src/3rdparty/v8/test/cctest/test-accessors.cc
src/3rdparty/v8/test/cctest/test-alloc.cc
src/3rdparty/v8/test/cctest/test-api.cc
src/3rdparty/v8/test/cctest/test-debug.cc
src/3rdparty/v8/test/cctest/test-decls.cc
src/3rdparty/v8/test/cctest/test-double.cc
src/3rdparty/v8/test/cctest/test-heap-profiler.cc
src/3rdparty/v8/test/cctest/test-heap.cc
src/3rdparty/v8/test/cctest/test-list.cc
src/3rdparty/v8/test/cctest/test-mark-compact.cc
src/3rdparty/v8/test/cctest/test-regexp.cc
src/3rdparty/v8/test/cctest/test-strings.cc
src/3rdparty/v8/test/cctest/test-weakmaps.cc
src/3rdparty/v8/test/cctest/testcfg.py
src/3rdparty/v8/test/mjsunit/array-bounds-check-removal.js [new file with mode: 0644]
src/3rdparty/v8/test/mjsunit/big-array-literal.js
src/3rdparty/v8/test/mjsunit/compiler/alloc-object-huge.js
src/3rdparty/v8/test/mjsunit/compiler/inline-arguments.js
src/3rdparty/v8/test/mjsunit/compiler/inline-construct.js
src/3rdparty/v8/test/mjsunit/compiler/literals.js
src/3rdparty/v8/test/mjsunit/compiler/optimize-bitnot.js [new file with mode: 0644]
src/3rdparty/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js
src/3rdparty/v8/test/mjsunit/debug-evaluate-locals-optimized.js
src/3rdparty/v8/test/mjsunit/debug-function-scopes.js [new file with mode: 0644]
src/3rdparty/v8/test/mjsunit/debug-liveedit-stack-padding.js [new file with mode: 0644]
src/3rdparty/v8/test/mjsunit/debug-scripts-request.js
src/3rdparty/v8/test/mjsunit/debug-stepin-builtin-callback.js [new file with mode: 0644]
src/3rdparty/v8/test/mjsunit/declare-locally.js
src/3rdparty/v8/test/mjsunit/error-constructors.js
src/3rdparty/v8/test/mjsunit/harmony/debug-function-scopes.js [new file with mode: 0644]
src/3rdparty/v8/test/mjsunit/harmony/module-linking.js [new file with mode: 0644]
src/3rdparty/v8/test/mjsunit/harmony/module-parsing.js
src/3rdparty/v8/test/mjsunit/harmony/module-resolution.js
src/3rdparty/v8/test/mjsunit/math-floor-of-div.js [new file with mode: 0644]
src/3rdparty/v8/test/mjsunit/mjsunit.js
src/3rdparty/v8/test/mjsunit/mjsunit.status
src/3rdparty/v8/test/mjsunit/regexp-capture-3.js [new file with mode: 0644]
src/3rdparty/v8/test/mjsunit/regress/regress-1119.js
src/3rdparty/v8/test/mjsunit/regress/regress-115452.js
src/3rdparty/v8/test/mjsunit/regress/regress-1170.js
src/3rdparty/v8/test/mjsunit/regress/regress-117409.js [new file with mode: 0644]
src/3rdparty/v8/test/mjsunit/regress/regress-119609.js [new file with mode: 0644]
src/3rdparty/v8/test/mjsunit/regress/regress-1217.js
src/3rdparty/v8/test/mjsunit/regress/regress-123512.js [new file with mode: 0644]
src/3rdparty/v8/test/mjsunit/regress/regress-123919.js [new file with mode: 0644]
src/3rdparty/v8/test/mjsunit/regress/regress-124594.js [new file with mode: 0644]
src/3rdparty/v8/test/mjsunit/regress/regress-125515.js [new file with mode: 0644]
src/3rdparty/v8/test/mjsunit/regress/regress-126412.js [new file with mode: 0644]
src/3rdparty/v8/test/mjsunit/regress/regress-128018.js [new file with mode: 0644]
src/3rdparty/v8/test/mjsunit/regress/regress-128146.js [new file with mode: 0644]
src/3rdparty/v8/test/mjsunit/regress/regress-1639-2.js
src/3rdparty/v8/test/mjsunit/regress/regress-1639.js
src/3rdparty/v8/test/mjsunit/regress/regress-2071.js [new file with mode: 0644]
src/3rdparty/v8/test/mjsunit/regress/regress-2110.js [new file with mode: 0644]
src/3rdparty/v8/test/mjsunit/regress/regress-crbug-122271.js [new file with mode: 0644]
src/3rdparty/v8/test/mjsunit/regress/regress-crbug-126414.js [new file with mode: 0644]
src/3rdparty/v8/test/mjsunit/regress/regress-fast-literal-transition.js [new file with mode: 0644]
src/3rdparty/v8/test/mjsunit/regress/regress-transcendental.js [new file with mode: 0644]
src/3rdparty/v8/test/mozilla/mozilla.status
src/3rdparty/v8/test/sputnik/sputnik.status
src/3rdparty/v8/test/test262/README
src/3rdparty/v8/test/test262/test262.status
src/3rdparty/v8/test/test262/testcfg.py
src/3rdparty/v8/tools/check-static-initializers.sh
src/3rdparty/v8/tools/common-includes.sh
src/3rdparty/v8/tools/grokdump.py
src/3rdparty/v8/tools/gyp/v8.gyp
src/3rdparty/v8/tools/js2c.py
src/3rdparty/v8/tools/jsmin.py
src/3rdparty/v8/tools/presubmit.py
src/3rdparty/v8/tools/push-to-trunk.sh
src/3rdparty/v8/tools/test-wrapper-gypbuild.py

index 25eaf56..97dac40 100644 (file)
@@ -1,3 +1,193 @@
+2012-05-22: Version 3.11.4
+
+        Some cleanup to common.gypi. This fixes some host/target combinations
+        that weren't working in the Make build on Mac.
+
+        Handle EINTR in socket functions and continue incomplete sends.
+        (issue 2098)
+
+        Fixed python deprecations.  (issue 1391)
+
+        Made socket send and receive more robust and return 0 on failure.
+        (Chromium issue 15719)
+
+        Fixed GCC 4.7 (C++11) compilation.  (issue 2136)
+
+        Set '-m32' option for host and target platforms
+
+        Performance and stability improvements on all platforms.
+
+
+2012-05-18: Version 3.11.3
+
+        Disable optimization for functions that have scopes that cannot be
+        reconstructed from the context chain. (issue 2071)
+
+        Define V8_EXPORT to nothing for clients of v8. (Chromium issue 90078)
+
+        Correctly check for native error objects.  (Chromium issue 2138)
+
+        Performance and stability improvements on all platforms.
+
+
+2012-05-16: Version 3.11.2
+
+        Revert r11496. (Chromium issue 128146)
+
+        Implement map collection for incremental marking. (issue 1465)
+
+        Add toString method to CallSite (which describes a frame of the
+        stack trace).
+
+
+2012-05-15: Version 3.11.1
+
+        Added a readbuffer function to d8 that reads a file into an ArrayBuffer.
+
+        Fix freebsd build. (V8 issue 2126)
+
+        Performance and stability improvements on all platforms.
+
+
+2012-05-11: Version 3.11.0
+
+        Fixed compose-discard crasher from r11524 (issue 2123).
+
+        Activated new global semantics by default. Global variables can
+        now shadow properties of the global object (ES5.1 erratum).
+
+        Properly set ElementsKind of empty FAST_DOUBLE_ELEMENTS arrays when
+        transitioning (Chromium issue 117409).
+
+        Made Error.prototype.name writable again, as required by the spec and
+        the web (Chromium issue 69187).
+
+        Implemented map collection with incremental marking (issue 1465).
+
+        Regexp: Fixed overflow in min-match-length calculation
+        (Chromium issue 126412).
+
+        MIPS: Fixed illegal instruction use on Loongson in code for
+        Math.random() (issue 2115).
+
+        Fixed crash bug in VisitChoice (Chromium issue 126272).
+
+        Fixed unsigned-Smi check in MappedArgumentsLookup
+        (Chromium issue 126414).
+
+        Fixed LiveEdit for function with no locals (issue 825).
+
+        Fixed register clobbering in LoadIC for interceptors
+        (Chromium issue 125988).
+
+        Implemented clearing of CompareICs (issue 2102).
+
+        Performance and stability improvements on all platforms.
+
+
+2012-05-03: Version 3.10.8
+
+        Enabled MIPS cross-compilation.
+
+        Ensured reload of elements pointer in StoreFastDoubleElement stub.
+        (Chromium issue 125515)
+
+        Fixed corner cases in truncation behavior when storing to
+        TypedArrays. (issue 2110)
+
+        Fixed failure to properly recognize and report out-of-memory
+        conditions when allocating code space pages. (Chromium issue
+        118625)
+
+        Fixed idle notifications to perform a round of incremental GCs
+        after context disposal. (issue 2107)
+
+        Fixed preparser for try statement. (issue 2109)
+
+        Performance and stability improvements on all platforms.
+
+
+2012-04-30: Version 3.10.7
+
+        Performance and stability improvements on all platforms.
+
+
+2012-04-26: Version 3.10.6
+
+        Fixed some bugs in accessing details of the last regexp match.
+
+        Fixed source property of empty RegExp objects. (issue 1982)
+
+        Enabled inlining some V8 API functions.
+
+        Performance and stability improvements on all platforms.
+
+
+2012-04-23: Version 3.10.5
+
+        Put new global var semantics behind a flag until WebKit tests are
+        cleaned up.
+
+        Enabled stepping into callback passed to builtins.
+        (Chromium issue 109564)
+
+        Performance and stability improvements on all platforms.
+
+
+2012-04-19: Version 3.10.4
+
+        Fixed issues when stressing compaction with WeakMaps.
+
+        Fixed missing GVN flag for new-space promotion. (Chromium issue 123919)
+
+        Simplify invocation sequence at monomorphic function invocation sites.
+        (issue 2079)
+
+        Performance and stability improvements on all platforms.
+
+
+2012-04-17: Version 3.10.3
+
+        Fixed several bugs in heap profiles (including issue 2078).
+
+        Throw syntax errors on illegal escape sequences.
+
+        Implemented rudimentary module linking (behind --harmony flag)
+
+        Implemented ES5 erratum: Global declarations should shadow
+        inherited properties.
+
+        Made handling of const more consistent when combined with 'eval'
+        and 'with'.
+
+        Fixed V8 on MinGW-x64 (issue 2026).
+
+        Performance and stability improvements on all platforms.
+
+
+2012-04-13: Version 3.10.2
+
+        Fixed native ARM build (issues 1744, 539)
+
+        Return LOOKUP variable instead of CONTEXT for non-context allocated
+        outer scope parameters (Chromium issue 119609).
+
+        Fixed regular and ElementsKind transitions interfering with each other
+        (Chromium issue 122271).
+
+        Improved performance of keyed loads/stores which have a HeapNumber
+        index (issues 1388, 1295).
+
+        Fixed WeakMap processing for evacuation candidates (issue 2060).
+
+        Bailout on possible direct eval calls (Chromium issue 122681).
+
+        Do not assume that names of function expressions are context-allocated
+        (issue 2051).
+
+        Performance and stability improvements on all platforms.
+
+
 2012-04-10: Version 3.10.1
 
         Fixed bug with arguments object in inlined functions (issue 2045).
diff --git a/src/3rdparty/v8/DEPS b/src/3rdparty/v8/DEPS
new file mode 100644 (file)
index 0000000..e50d1d2
--- /dev/null
@@ -0,0 +1,27 @@
+# Note: The buildbots evaluate this file with CWD set to the parent
+# directory and assume that the root of the checkout is in ./v8/, so
+# all paths in here must match this assumption.
+
+deps = {
+  # Remember to keep the revision in sync with the Makefile.
+  "v8/build/gyp":
+    "http://gyp.googlecode.com/svn/trunk@1282",
+}
+
+deps_os = {
+  "win": {
+    "v8/third_party/cygwin":
+      "http://src.chromium.org/svn/trunk/deps/third_party/cygwin@66844",
+
+    "v8/third_party/python_26":
+      "http://src.chromium.org/svn/trunk/tools/third_party/python_26@89111",
+  }
+}
+
+hooks = [
+  {
+    # A change to a .gyp, .gypi, or to GYP itself should run the generator.
+    "pattern": ".",
+    "action": ["python", "v8/build/gyp_v8"],
+  },
+]
index da1d688..0d825c0 100644 (file)
@@ -137,6 +137,12 @@ ENVFILE = $(OUTDIR)/environment
 # Target definitions. "all" is the default.
 all: $(MODES)
 
+# Special target for the buildbots to use. Depends on $(OUTDIR)/Makefile
+# having been created before.
+buildbot:
+       $(MAKE) -C "$(OUTDIR)" BUILDTYPE=$(BUILDTYPE) \
+               builddir="$(abspath $(OUTDIR))/$(BUILDTYPE)"
+
 # Compile targets. MODES and ARCHES are convenience targets.
 .SECONDEXPANSION:
 $(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES))
@@ -144,21 +150,21 @@ $(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES))
 $(ARCHES): $(addprefix $$@.,$(MODES))
 
 # Defines how to build a particular target (e.g. ia32.release).
-$(BUILDS): $(OUTDIR)/Makefile-$$(basename $$@)
-       @$(MAKE) -C "$(OUTDIR)" -f Makefile-$(basename $@) \
+$(BUILDS): $(OUTDIR)/Makefile.$$(basename $$@)
+       @$(MAKE) -C "$(OUTDIR)" -f Makefile.$(basename $@) \
                 CXX="$(CXX)" LINK="$(LINK)" \
                 BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
                             python -c "print raw_input().capitalize()") \
                 builddir="$(shell pwd)/$(OUTDIR)/$@"
 
-native: $(OUTDIR)/Makefile-native
-       @$(MAKE) -C "$(OUTDIR)" -f Makefile-native \
+native: $(OUTDIR)/Makefile.native
+       @$(MAKE) -C "$(OUTDIR)" -f Makefile.native \
                 CXX="$(CXX)" LINK="$(LINK)" BUILDTYPE=Release \
                 builddir="$(shell pwd)/$(OUTDIR)/$@"
 
 # TODO(jkummerow): add "android.debug" when we need it.
-android android.release: $(OUTDIR)/Makefile-android
-       @$(MAKE) -C "$(OUTDIR)" -f Makefile-android \
+android android.release: $(OUTDIR)/Makefile.android
+       @$(MAKE) -C "$(OUTDIR)" -f Makefile.android \
                CXX="$(ANDROID_TOOL_PREFIX)-g++" \
                AR="$(ANDROID_TOOL_PREFIX)-ar" \
                RANLIB="$(ANDROID_TOOL_PREFIX)-ranlib" \
@@ -191,61 +197,41 @@ native.check: native
            --arch-and-mode=. $(TESTFLAGS)
 
 # Clean targets. You can clean each architecture individually, or everything.
-$(addsuffix .clean,$(ARCHES)):
-       rm -f $(OUTDIR)/Makefile-$(basename $@)
+$(addsuffix .clean,$(ARCHES)) android.clean:
+       rm -f $(OUTDIR)/Makefile.$(basename $@)
        rm -rf $(OUTDIR)/$(basename $@).release
        rm -rf $(OUTDIR)/$(basename $@).debug
-       find $(OUTDIR) -regex '.*\(host\|target\)-$(basename $@)\.mk' -delete
+       find $(OUTDIR) -regex '.*\(host\|target\).$(basename $@)\.mk' -delete
 
 native.clean:
-       rm -f $(OUTDIR)/Makefile-native
+       rm -f $(OUTDIR)/Makefile.native
        rm -rf $(OUTDIR)/native
-       find $(OUTDIR) -regex '.*\(host\|target\)-native\.mk' -delete
-
-android.clean:
-       rm -f $(OUTDIR)/Makefile-android
-       rm -rf $(OUTDIR)/android.release
-       find $(OUTDIR) -regex '.*\(host\|target\)-android\.mk' -delete
+       find $(OUTDIR) -regex '.*\(host\|target\).native\.mk' -delete
 
-clean: $(addsuffix .clean,$(ARCHES)) native.clean
+clean: $(addsuffix .clean,$(ARCHES)) native.clean android.clean
 
 # GYP file generation targets.
-$(OUTDIR)/Makefile-ia32: $(GYPFILES) $(ENVFILE)
-       GYP_GENERATORS=make \
-       build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-                     -Ibuild/standalone.gypi --depth=. -Dtarget_arch=ia32 \
-                     -S-ia32 $(GYPFLAGS)
-
-$(OUTDIR)/Makefile-x64: $(GYPFILES) $(ENVFILE)
-       GYP_GENERATORS=make \
-       build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-                     -Ibuild/standalone.gypi --depth=. -Dtarget_arch=x64 \
-                     -S-x64 $(GYPFLAGS)
-
-$(OUTDIR)/Makefile-arm: $(GYPFILES) $(ENVFILE) build/armu.gypi
-       GYP_GENERATORS=make \
-       build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-                     -Ibuild/standalone.gypi --depth=. -Ibuild/armu.gypi \
-                     -S-arm $(GYPFLAGS)
-
-$(OUTDIR)/Makefile-mips: $(GYPFILES) $(ENVFILE) build/mipsu.gypi
+MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ARCHES))
+$(MAKEFILES): $(GYPFILES) $(ENVFILE)
        GYP_GENERATORS=make \
        build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-                     -Ibuild/standalone.gypi --depth=. -Ibuild/mipsu.gypi \
-                     -S-mips $(GYPFLAGS)
+                     -Ibuild/standalone.gypi --depth=. \
+                     -Dv8_target_arch=$(subst .,,$(suffix $@)) \
+                     -S.$(subst .,,$(suffix $@)) $(GYPFLAGS)
 
-$(OUTDIR)/Makefile-native: $(GYPFILES) $(ENVFILE)
+$(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE)
        GYP_GENERATORS=make \
        build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-                     -Ibuild/standalone.gypi --depth=. -S-native $(GYPFLAGS)
+                     -Ibuild/standalone.gypi --depth=. -S.native $(GYPFLAGS)
 
-$(OUTDIR)/Makefile-android: $(GYPFILES) $(ENVFILE) build/android.gypi \
+$(OUTDIR)/Makefile.android: $(GYPFILES) $(ENVFILE) build/android.gypi \
                             must-set-ANDROID_NDK_ROOT
        GYP_GENERATORS=make \
        CC="${ANDROID_TOOL_PREFIX}-gcc" \
+       CXX="${ANDROID_TOOL_PREFIX}-g++" \
        build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
                      -Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \
-                     -S-android $(GYPFLAGS)
+                     -S.android $(GYPFLAGS)
 
 must-set-ANDROID_NDK_ROOT:
 ifndef ANDROID_NDK_ROOT
@@ -261,7 +247,8 @@ $(ENVFILE): $(ENVFILE).new
 
 # Stores current GYPFLAGS in a file.
 $(ENVFILE).new:
-       @mkdir -p $(OUTDIR); echo "GYPFLAGS=$(GYPFLAGS)" > $(ENVFILE).new;
+       @mkdir -p $(OUTDIR); echo "GYPFLAGS=$(GYPFLAGS)" > $(ENVFILE).new; \
+           echo "CXX=$(CXX)" >> $(ENVFILE).new
 
 # Dependencies.
 dependencies:
index 34d0efc..ebce7ff 100644 (file)
@@ -101,14 +101,14 @@ LIBRARY_FLAGS = {
     'os:linux': {
       'CCFLAGS':      ['-ansi'] + GCC_EXTRA_CCFLAGS,
       'library:shared': {
-        'CPPDEFINES': ['V8_SHARED'],
+        'CPPDEFINES': ['V8_SHARED', 'BUILDING_V8_SHARED'],
         'LIBS': ['pthread']
       }
     },
     'os:macos': {
       'CCFLAGS':      ['-ansi', '-mmacosx-version-min=10.4'],
       'library:shared': {
-        'CPPDEFINES': ['V8_SHARED']
+        'CPPDEFINES': ['V8_SHARED', 'BUILDING_V8_SHARED'],
       }
     },
     'os:freebsd': {
@@ -1601,4 +1601,17 @@ except:
   pass
 
 
+def WarnAboutDeprecation():
+  print """
+#######################################################
+#  WARNING: Building V8 with SCons is deprecated and  #
+#  will not work much longer. Please switch to using  #
+#  the GYP-based build now. Instructions are at       #
+#  http://code.google.com/p/v8/wiki/BuildingWithGYP.  #
+#######################################################
+  """
+
+WarnAboutDeprecation()
+import atexit
+atexit.register(WarnAboutDeprecation)
 Build()
diff --git a/src/3rdparty/v8/build/armu.gypi b/src/3rdparty/v8/build/armu.gypi
deleted file mode 100644 (file)
index d15b8ab..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2011 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-#       notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-#       copyright notice, this list of conditions and the following
-#       disclaimer in the documentation and/or other materials provided
-#       with the distribution.
-#     * Neither the name of Google Inc. nor the names of its
-#       contributors may be used to endorse or promote products derived
-#       from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-{
-  'variables': {
-    'target_arch': 'ia32',
-    'v8_target_arch': 'arm',
-    'armv7': 1,
-    'arm_neon': 0,
-    'arm_fpu': 'vfpv3',
-  },
-}
index 3016d0c..1726d2a 100644 (file)
       ['v8_enable_gdbjit==1', {
         'defines': ['ENABLE_GDB_JIT_INTERFACE',],
       }],
-      ['OS!="mac"', {
-        # TODO(mark): The OS!="mac" conditional is temporary. It can be
-        # removed once the Mac Chromium build stops setting target_arch to
-        # ia32 and instead sets it to mac. Other checks in this file for
-        # OS=="mac" can be removed at that time as well. This can be cleaned
-        # up once http://crbug.com/44205 is fixed.
+      ['v8_target_arch=="arm"', {
+        'defines': [
+          'V8_TARGET_ARCH_ARM',
+        ],
         'conditions': [
-          ['v8_target_arch=="arm"', {
+          [ 'v8_can_use_unaligned_accesses=="true"', {
             'defines': [
-              'V8_TARGET_ARCH_ARM',
+              'CAN_USE_UNALIGNED_ACCESSES=1',
             ],
-            'conditions': [
-              [ 'v8_can_use_unaligned_accesses=="true"', {
-                'defines': [
-                  'CAN_USE_UNALIGNED_ACCESSES=1',
-                ],
-              }],
-              [ 'v8_can_use_unaligned_accesses=="false"', {
-                'defines': [
-                  'CAN_USE_UNALIGNED_ACCESSES=0',
-                ],
-              }],
-              [ 'v8_can_use_vfp_instructions=="true"', {
-                'defines': [
-                  'CAN_USE_VFP_INSTRUCTIONS',
-                ],
-              }],
-              [ 'v8_use_arm_eabi_hardfloat=="true"', {
-                'defines': [
-                  'USE_EABI_HARDFLOAT=1',
-                  'CAN_USE_VFP_INSTRUCTIONS',
-                ],
-                'target_conditions': [
-                  ['_toolset=="target"', {
-                    'cflags': ['-mfloat-abi=hard',],
-                  }],
-                ],
-              }, {
-                'defines': [
-                  'USE_EABI_HARDFLOAT=0',
-                ],
-              }],
-              # The ARM assembler assumes the host is 32 bits,
-              # so force building 32-bit host tools.
-              ['host_arch=="x64" or OS=="android"', {
-                'target_conditions': [
-                  ['_toolset=="host"', {
-                    'cflags': ['-m32'],
-                    'ldflags': ['-m32'],
-                  }],
-                ],
-              }],
+          }],
+          [ 'v8_can_use_unaligned_accesses=="false"', {
+            'defines': [
+              'CAN_USE_UNALIGNED_ACCESSES=0',
             ],
           }],
-          ['v8_target_arch=="ia32"', {
+          [ 'v8_can_use_vfp_instructions=="true"', {
             'defines': [
-              'V8_TARGET_ARCH_IA32',
+              'CAN_USE_VFP_INSTRUCTIONS',
             ],
           }],
-          ['v8_target_arch=="mips"', {
+          [ 'v8_use_arm_eabi_hardfloat=="true"', {
             'defines': [
-              'V8_TARGET_ARCH_MIPS',
+              'USE_EABI_HARDFLOAT=1',
+              'CAN_USE_VFP_INSTRUCTIONS',
             ],
-            'conditions': [
-              [ 'target_arch=="mips"', {
-                'target_conditions': [
-                  ['_toolset=="target"', {
-                    'cflags': ['-EL'],
-                    'ldflags': ['-EL'],
-                    'conditions': [
-                      [ 'v8_use_mips_abi_hardfloat=="true"', {
-                        'cflags': ['-mhard-float'],
-                        'ldflags': ['-mhard-float'],
-                      }, {
-                        'cflags': ['-msoft-float'],
-                        'ldflags': ['-msoft-float'],
-                      }],
-                      ['mips_arch_variant=="mips32r2"', {
-                        'cflags': ['-mips32r2', '-Wa,-mips32r2'],
-                      }],
-                      ['mips_arch_variant=="loongson"', {
-                        'cflags': ['-mips3', '-Wa,-mips3'],
-                      }, {
-                        'cflags': ['-mips32', '-Wa,-mips32'],
-                      }],
-                    ],
-                  }],
-                ],
-              }],
-              [ 'v8_can_use_fpu_instructions=="true"', {
-                'defines': [
-                  'CAN_USE_FPU_INSTRUCTIONS',
-                ],
-              }],
-              [ 'v8_use_mips_abi_hardfloat=="true"', {
-                'defines': [
-                  '__mips_hard_float=1',
-                  'CAN_USE_FPU_INSTRUCTIONS',
-                ],
-              }, {
-                'defines': [
-                  '__mips_soft_float=1'
-                ],
-              }],
-              ['mips_arch_variant=="mips32r2"', {
-                'defines': ['_MIPS_ARCH_MIPS32R2',],
-              }],
-              ['mips_arch_variant=="loongson"', {
-                'defines': ['_MIPS_ARCH_LOONGSON',],
+            'target_conditions': [
+              ['_toolset=="target"', {
+                'cflags': ['-mfloat-abi=hard',],
               }],
-              # The MIPS assembler assumes the host is 32 bits,
-              # so force building 32-bit host tools.
-              ['host_arch=="x64"', {
-                'target_conditions': [
-                  ['_toolset=="host"', {
-                    'cflags': ['-m32'],
-                    'ldflags': ['-m32'],
+            ],
+          }, {
+            'defines': [
+              'USE_EABI_HARDFLOAT=0',
+            ],
+          }],
+        ],
+      }],  # v8_target_arch=="arm"
+      ['v8_target_arch=="ia32"', {
+        'defines': [
+          'V8_TARGET_ARCH_IA32',
+        ],
+      }],  # v8_target_arch=="ia32"
+      ['v8_target_arch=="mips"', {
+        'defines': [
+          'V8_TARGET_ARCH_MIPS',
+        ],
+        'variables': {
+          'mipscompiler': '<!($(echo ${CXX:-$(which g++)}) -v 2>&1 | grep -q "^Target: mips-" && echo "yes" || echo "no")',
+        },
+        'conditions': [
+          ['mipscompiler=="yes"', {
+            'target_conditions': [
+              ['_toolset=="target"', {
+                'cflags': ['-EL'],
+                'ldflags': ['-EL'],
+                'conditions': [
+                  [ 'v8_use_mips_abi_hardfloat=="true"', {
+                    'cflags': ['-mhard-float'],
+                    'ldflags': ['-mhard-float'],
+                  }, {
+                    'cflags': ['-msoft-float'],
+                    'ldflags': ['-msoft-float'],
+                  }],
+                  ['mips_arch_variant=="mips32r2"', {
+                    'cflags': ['-mips32r2', '-Wa,-mips32r2'],
+                  }],
+                  ['mips_arch_variant=="loongson"', {
+                    'cflags': ['-mips3', '-Wa,-mips3'],
+                  }, {
+                    'cflags': ['-mips32', '-Wa,-mips32'],
                   }],
                 ],
               }],
             ],
           }],
-          ['v8_target_arch=="x64"', {
+          [ 'v8_can_use_fpu_instructions=="true"', {
             'defines': [
-              'V8_TARGET_ARCH_X64',
+              'CAN_USE_FPU_INSTRUCTIONS',
             ],
           }],
-        ],
-      }, {  # Section for OS=="mac".
-        'conditions': [
-          ['target_arch=="ia32"', {
-            'xcode_settings': {
-              'ARCHS': ['i386'],
-            }
+          [ 'v8_use_mips_abi_hardfloat=="true"', {
+            'defines': [
+              '__mips_hard_float=1',
+              'CAN_USE_FPU_INSTRUCTIONS',
+            ],
+          }, {
+            'defines': [
+              '__mips_soft_float=1'
+            ],
           }],
-          ['target_arch=="x64"', {
-            'xcode_settings': {
-              'ARCHS': ['x86_64'],
-            }
+          ['mips_arch_variant=="mips32r2"', {
+            'defines': ['_MIPS_ARCH_MIPS32R2',],
+          }],
+          ['mips_arch_variant=="loongson"', {
+            'defines': ['_MIPS_ARCH_LOONGSON',],
           }],
         ],
-      }],
+      }],  # v8_target_arch=="mips"
+      ['v8_target_arch=="x64"', {
+        'defines': [
+          'V8_TARGET_ARCH_X64',
+        ],
+        'xcode_settings': {
+          'ARCHS': [ 'x86_64' ],
+        },
+        'msvs_settings': {
+          'VCLinkerTool': {
+            'StackReserveSize': '2097152',
+          },
+        },
+      }],  # v8_target_arch=="x64"
       ['v8_use_liveobjectlist=="true"', {
         'defines': [
           'ENABLE_DEBUGGER_SUPPORT',
       ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
          or OS=="netbsd"', {
         'conditions': [
-          [ 'target_arch=="ia32"', {
-            'cflags': [ '-m32' ],
-            'ldflags': [ '-m32' ],
-          }],
           [ 'v8_no_strict_aliasing==1', {
             'cflags': [ '-fno-strict-aliasing' ],
           }],
       ['OS=="solaris"', {
         'defines': [ '__C99FEATURES__=1' ],  # isinf() etc.
       }],
+      ['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
+         or OS=="netbsd" or OS=="mac" or OS=="android") and \
+        (v8_target_arch=="arm" or v8_target_arch=="ia32" or \
+         v8_target_arch=="mips")', {
+        # Check whether the host compiler and target compiler support the
+        # '-m32' option and set it if so.
+        'target_conditions': [
+          ['_toolset=="host"', {
+            'variables': {
+              'm32flag': '<!((echo | $(echo ${CXX_host:-${CXX:-$(which g++)}}) -m32 -E - > /dev/null 2>&1) && echo -n "-m32" || true)',
+            },
+            'cflags': [ '<(m32flag)' ],
+            'ldflags': [ '<(m32flag)' ],
+            'xcode_settings': {
+              'ARCHS': [ 'i386' ],
+            },
+          }],
+          ['_toolset=="target"', {
+            'variables': {
+              'm32flag': '<!((echo | $(echo ${CXX_target:-${CXX:-$(which g++)}}) -m32 -E - > /dev/null 2>&1) && echo -n "-m32" || true)',
+            },
+            'cflags': [ '<(m32flag)' ],
+            'ldflags': [ '<(m32flag)' ],
+            'xcode_settings': {
+              'ARCHS': [ 'i386' ],
+            },
+          }],
+        ],
+      }],
+      ['OS=="freebsd" or OS=="openbsd"', {
+        'cflags': [ '-I/usr/local/include' ],
+      }],
+      ['OS=="netbsd"', {
+        'cflags': [ '-I/usr/pkg/include' ],
+      }],
     ],  # conditions
     'configurations': {
       'Debug': {
           },
           'VCLinkerTool': {
             'LinkIncremental': '2',
-            # For future reference, the stack size needs to be increased
-            # when building for Windows 64-bit, otherwise some test cases
-            # can cause stack overflow.
-            # 'StackReserveSize': '297152',
           },
         },
         'conditions': [
-          ['OS=="freebsd" or OS=="openbsd"', {
-            'cflags': [ '-I/usr/local/include' ],
-          }],
-          ['OS=="netbsd"', {
-            'cflags': [ '-I/usr/pkg/include' ],
-          }],
           ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
             'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
                         '-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
               }],
             ],
           }],
-          ['OS=="freebsd" or OS=="openbsd"', {
-            'cflags': [ '-I/usr/local/include' ],
-          }],
-          ['OS=="netbsd"', {
-            'cflags': [ '-I/usr/pkg/include' ],
-          }],
           ['OS=="mac"', {
             'xcode_settings': {
               'GCC_OPTIMIZATION_LEVEL': '3',  # -O3
               'VCLinkerTool': {
                 'LinkIncremental': '1',
                 'OptimizeReferences': '2',
-                'OptimizeForWindows98': '1',
                 'EnableCOMDATFolding': '2',
-                # For future reference, the stack size needs to be
-                # increased when building for Windows 64-bit, otherwise
-                # some test cases can cause stack overflow.
-                # 'StackReserveSize': '297152',
               },
             },
           }],  # OS=="win"
index 0fe3403..345f777 100755 (executable)
@@ -38,6 +38,11 @@ import sys
 script_dir = os.path.dirname(__file__)
 v8_root = os.path.normpath(os.path.join(script_dir, os.pardir))
 
+if __name__ == '__main__':
+  os.chdir(v8_root)
+  script_dir = os.path.dirname(__file__)
+  v8_root = '.'
+
 sys.path.insert(0, os.path.join(v8_root, 'tools'))
 import utils
 
@@ -93,7 +98,7 @@ def additional_include_files(args=[]):
       result.append(path)
 
   # Always include standalone.gypi
-  AddInclude(os.path.join(script_dir, 'standalone.gypi'))
+  AddInclude(os.path.join(v8_root, 'build', 'standalone.gypi'))
 
   # Optionally add supplemental .gypi files if present.
   supplements = glob.glob(os.path.join(v8_root, '*', 'supplement.gypi'))
@@ -135,7 +140,10 @@ if __name__ == '__main__':
       # path separators even on Windows due to the use of shlex.split().
       args.extend(shlex.split(gyp_file))
     else:
-      args.append(os.path.join(script_dir, 'all.gyp'))
+      # Note that this must not start with "./" or things break.
+      # So we rely on having done os.chdir(v8_root) above and use the
+      # relative path.
+      args.append(os.path.join('build', 'all.gyp'))
 
   args.extend(['-I' + i for i in additional_include_files(args)])
 
@@ -156,28 +164,6 @@ if __name__ == '__main__':
 
   # Generate for the architectures supported on the given platform.
   gyp_args = list(args)
-  target_arch = None
-  for p in gyp_args:
-    if p.find('-Dtarget_arch=') == 0:
-      target_arch = p
-  if target_arch is None:
-    gyp_args.append('-Dtarget_arch=ia32')
   if utils.GuessOS() == 'linux':
-    gyp_args.append('-S-ia32')
+    gyp_args.append('--generator-output=out')
   run_gyp(gyp_args)
-
-  if utils.GuessOS() == 'linux':
-    gyp_args = list(args)
-    gyp_args.append('-Dtarget_arch=x64')
-    gyp_args.append('-S-x64')
-    run_gyp(gyp_args)
-
-    gyp_args = list(args)
-    gyp_args.append('-I' + v8_root + '/build/armu.gypi')
-    gyp_args.append('-S-armu')
-    run_gyp(gyp_args)
-
-    gyp_args = list(args)
-    gyp_args.append('-I' + v8_root + '/build/mipsu.gypi')
-    gyp_args.append('-S-mipsu')
-    run_gyp(gyp_args)
diff --git a/src/3rdparty/v8/build/mipsu.gypi b/src/3rdparty/v8/build/mipsu.gypi
deleted file mode 100644 (file)
index 637ff84..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-#       notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-#       copyright notice, this list of conditions and the following
-#       disclaimer in the documentation and/or other materials provided
-#       with the distribution.
-#     * Neither the name of Google Inc. nor the names of its
-#       contributors may be used to endorse or promote products derived
-#       from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-{
-  'variables': {
-    'target_arch': 'ia32',
-    'v8_target_arch': 'mips',
-  },
-}
index e9b0565..ebdf557 100644 (file)
@@ -37,8 +37,9 @@
       'variables': {
         'variables': {
           'conditions': [
-            ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
-              # This handles the Linux platforms we generally deal with.
+            ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or \
+               OS=="netbsd" or OS=="mac"', {
+              # This handles the Unix platforms we generally deal with.
               # Anything else gets passed through, which probably won't work
               # very well; such hosts should pass an explicit target_arch
               # to gyp.
@@ -46,7 +47,8 @@
                 '<!(uname -m | sed -e "s/i.86/ia32/;\
                   s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mips/")',
             }, {
-              # OS!="linux" and OS!="freebsd" and OS!="openbsd" and OS!="netbsd"
+              # OS!="linux" and OS!="freebsd" and OS!="openbsd" and
+              # OS!="netbsd" and OS!="mac"
               'host_arch%': 'ia32',
             }],
           ],
         'want_separate_host_toolset': 0,
       }],
     ],
+    # Default ARM variable settings.
+    'armv7%': 1,
+    'arm_neon%': 0,
+    'arm_fpu%': 'vfpv3',
   },
   'target_defaults': {
     'default_configuration': 'Debug',
       },
     }],  # OS=="win"
     ['OS=="mac"', {
+      'xcode_settings': {
+        'SYMROOT': '<(DEPTH)/xcodebuild',
+      },
       'target_defaults': {
         'xcode_settings': {
           'ALWAYS_SEARCH_USER_PATHS': 'NO',
           'GCC_WARN_ABOUT_MISSING_NEWLINE': 'YES',  # -Wnewline-eof
           'MACOSX_DEPLOYMENT_TARGET': '10.4',       # -mmacosx-version-min=10.4
           'PREBINDING': 'NO',                       # No -Wl,-prebind
+          'SYMROOT': '<(DEPTH)/xcodebuild',
           'USE_HEADERMAP': 'NO',
           'OTHER_CFLAGS': [
             '-fno-strict-aliasing',
old mode 100644 (file)
new mode 100755 (executable)
index e36659f..8f380f2 100644 (file)
@@ -368,16 +368,20 @@ class V8EXPORT HeapSnapshot {
    * with the following structure:
    *
    *  {
-   *    snapshot: {title: "...", uid: nnn},
-   *    nodes: [
-   *      meta-info (JSON string),
-   *      nodes themselves
-   *    ],
-   *    strings: [strings]
+   *    snapshot: {
+   *      title: "...",
+   *      uid: nnn,
+   *      meta: { meta-info },
+   *      node_count: nnn,
+   *      edge_count: nnn
+   *    },
+   *    nodes: [nodes array],
+   *    edges: [edges array],
+   *    strings: [strings array]
    *  }
    *
-   * Outgoing node links are stored after each node. Nodes reference strings
-   * and other nodes by their indexes in corresponding arrays.
+   * Nodes reference strings, other nodes, and edges by their indexes
+   * in corresponding arrays.
    */
   void Serialize(OutputStream* stream, SerializationFormat format) const;
 };
@@ -409,6 +413,19 @@ class V8EXPORT HeapProfiler {
   static const HeapSnapshot* FindSnapshot(unsigned uid);
 
   /**
+   * Returns SnapshotObjectId for a heap object referenced by |value| if
+   * it has been seen by the heap profiler, kUnknownObjectId otherwise.
+   */
+  static SnapshotObjectId GetSnapshotObjectId(Handle<Value> value);
+
+  /**
+   * A constant for invalid SnapshotObjectId. GetSnapshotObjectId will return
+   * it in case heap profiler cannot find id  for the object passed as
+   * parameter. HeapSnapshot::GetNodeById will always return NULL for such id.
+   */
+  static const SnapshotObjectId kUnknownObjectId = 0;
+
+  /**
    * Takes a heap snapshot and returns it. Title may be an empty string.
    * See HeapSnapshot::Type for types description.
    */
@@ -418,6 +435,33 @@ class V8EXPORT HeapProfiler {
       ActivityControl* control = NULL);
 
   /**
+   * Starts tracking of heap objects population statistics. After calling
+   * this method, all heap objects relocations done by the garbage collector
+   * are being registered.
+   */
+  static void StartHeapObjectsTracking();
+
+  /**
+   * Adds a new time interval entry to the aggregated statistics array. The
+   * time interval entry contains information on the current heap objects
+   * population size. The method also updates aggregated statistics and
+   * reports updates for all previous time intervals via the OutputStream
+   * object. Updates on each time interval are provided as a stream of the
+   * HeapStatsUpdate structure instances.
+   *
+   * StartHeapObjectsTracking must be called before the first call to this
+   * method.
+   */
+  static void PushHeapObjectsStats(OutputStream* stream);
+
+  /**
+   * Stops tracking of heap objects population statistics, cleans up all
+   * collected data. StartHeapObjectsTracking must be called again prior to
+   * calling PushHeapObjectsStats next time.
+   */
+  static void StopHeapObjectsTracking();
+
+  /**
    * Deletes all snapshots taken. All previously returned pointers to
    * snapshots and their contents become invalid after this call.
    */
@@ -514,6 +558,19 @@ class V8EXPORT RetainedObjectInfo {  // NOLINT
 };
 
 
+/**
+ * A struct for exporting HeapStats data from V8, using "push" model.
+ * See HeapProfiler::PushHeapObjectsStats.
+ */
+struct HeapStatsUpdate {
+  HeapStatsUpdate(uint32_t index, uint32_t count, uint32_t size)
+    : index(index), count(count), size(size) { }
+  uint32_t index;  // Index of the time interval that was changed.
+  uint32_t count;  // New value of count field for the interval with this index.
+  uint32_t size;  // New value of size field for the interval with this index.
+};
+
+
 }  // namespace v8
 
 
index 0d371c9..d31ef54 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
 
 #else  // _WIN32
 
-// Setup for Linux shared library export. There is no need to distinguish
-// between building or using the V8 shared library, but we should not
-// export symbols when we are building a static library.
+// Setup for Linux shared library export.
 #if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
+#ifdef BUILDING_V8_SHARED
 #define V8EXPORT __attribute__ ((visibility("default")))
+#else
+#define V8EXPORT
+#endif
 #else  // defined(__GNUC__) && (__GNUC__ >= 4)
 #define V8EXPORT
 #endif  // defined(__GNUC__) && (__GNUC__ >= 4)
@@ -107,6 +109,7 @@ class Data;
 class AccessorInfo;
 class StackTrace;
 class StackFrame;
+class Isolate;
 
 namespace internal {
 
@@ -587,12 +590,6 @@ class ScriptOrigin {
  */
 class V8EXPORT Script {
  public:
-  enum CompileFlags {
-      Default    = 0x00,
-      QmlMode    = 0x01,
-      NativeMode = 0x02
-  };
-
   /**
    * Compiles the specified script (context-independent).
    *
@@ -611,8 +608,7 @@ class V8EXPORT Script {
   static Local<Script> New(Handle<String> source,
                            ScriptOrigin* origin = NULL,
                            ScriptData* pre_data = NULL,
-                           Handle<String> script_data = Handle<String>(),
-                           CompileFlags = Default);
+                           Handle<String> script_data = Handle<String>());
 
   /**
    * Compiles the specified script using the specified file name
@@ -625,8 +621,7 @@ class V8EXPORT Script {
    *   will use the currently entered context).
    */
   static Local<Script> New(Handle<String> source,
-                           Handle<Value> file_name,
-                           CompileFlags = Default);
+                           Handle<Value> file_name);
 
   /**
    * Compiles the specified script (bound to current context).
@@ -647,8 +642,7 @@ class V8EXPORT Script {
   static Local<Script> Compile(Handle<String> source,
                                ScriptOrigin* origin = NULL,
                                ScriptData* pre_data = NULL,
-                               Handle<String> script_data = Handle<String>(),
-                               CompileFlags = Default);
+                               Handle<String> script_data = Handle<String>());
 
   /**
    * Compiles the specified script using the specified file name
@@ -665,8 +659,7 @@ class V8EXPORT Script {
    */
   static Local<Script> Compile(Handle<String> source,
                                Handle<Value> file_name,
-                               Handle<String> script_data = Handle<String>(),
-                               CompileFlags = Default);
+                               Handle<String> script_data = Handle<String>());
 
   /**
    * Runs the script returning the resulting value.  If the script is
@@ -676,7 +669,6 @@ class V8EXPORT Script {
    * compiled.
    */
   Local<Value> Run();
-  Local<Value> Run(Handle<Object> qml);
 
   /**
    * Returns the script id value.
@@ -873,13 +865,13 @@ class Value : public Data {
    * Returns true if this value is the undefined value.  See ECMA-262
    * 4.3.10.
    */
-  V8EXPORT bool IsUndefined() const;
+  inline bool IsUndefined() const;
 
   /**
    * Returns true if this value is the null value.  See ECMA-262
    * 4.3.11.
    */
-  V8EXPORT bool IsNull() const;
+  inline bool IsNull() const;
 
    /**
    * Returns true if this value is true.
@@ -967,11 +959,6 @@ class Value : public Data {
    */
   V8EXPORT bool IsRegExp() const;
 
-  /**
-   * Returns true if this value is an Error.
-   */
-  V8EXPORT bool IsError() const;
-
   V8EXPORT Local<Boolean> ToBoolean() const;
   V8EXPORT Local<Number> ToNumber() const;
   V8EXPORT Local<String> ToString() const;
@@ -998,7 +985,11 @@ class Value : public Data {
   V8EXPORT bool StrictEquals(Handle<Value> that) const;
 
  private:
+  inline bool QuickIsUndefined() const;
+  inline bool QuickIsNull() const;
   inline bool QuickIsString() const;
+  V8EXPORT bool FullIsUndefined() const;
+  V8EXPORT bool FullIsNull() const;
   V8EXPORT bool FullIsString() const;
 };
 
@@ -1045,51 +1036,6 @@ class String : public Primitive {
   V8EXPORT bool MayContainNonAscii() const;
 
   /**
-   * Returns the hash of this string.
-   */
-  V8EXPORT uint32_t Hash() const;
-
-  struct CompleteHashData {
-    CompleteHashData() : length(0), hash(0), symbol_id(0) {}
-    int length;
-    uint32_t hash;
-    uint32_t symbol_id;
-  };
-
-  /**
-   * Returns the "complete" hash of the string.  This is
-   * all the information about the string needed to implement
-   * a very efficient hash keyed on the string.
-   *
-   * The members of CompleteHashData are:
-   *    length: The length of the string.  Equivalent to Length()
-   *    hash: The hash of the string.  Equivalent to Hash()
-   *    symbol_id: If the string is a sequential symbol, the symbol
-   *        id, otherwise 0.  If the symbol ids of two strings are
-   *        the same (and non-zero) the two strings are identical.
-   *        If the symbol ids are different the strings may still be
-   *        identical, but an Equals() check must be performed.
-   */
-  V8EXPORT CompleteHashData CompleteHash() const;
-
-  /**
-   * Compute a hash value for the passed UTF16 string
-   * data.
-   */
-  V8EXPORT static uint32_t ComputeHash(uint16_t *string, int length);
-  V8EXPORT static uint32_t ComputeHash(char *string, int length);
-
-  /**
-   * Returns true if this string is equal to the external
-   * string data provided.
-   */
-  V8EXPORT bool Equals(uint16_t *string, int length);
-  V8EXPORT bool Equals(char *string, int length);
-  inline bool Equals(Handle<Value> that) const {
-    return v8::Value::Equals(that);
-  }
-
-  /**
    * Write the contents of the string to an external buffer.
    * If no arguments are given, expects the buffer to be large
    * enough to hold the entire string and NULL terminator. Copies
@@ -1120,8 +1066,6 @@ class String : public Primitive {
     NO_NULL_TERMINATION = 2
   };
 
-  V8EXPORT uint16_t GetCharacter(int index);
-
   // 16-bit character codes.
   V8EXPORT int Write(uint16_t* buffer,
                      int start = 0,
@@ -1142,6 +1086,7 @@ class String : public Primitive {
    * A zero length string.
    */
   V8EXPORT static v8::Local<v8::String> Empty();
+  inline static v8::Local<v8::String> Empty(Isolate* isolate);
 
   /**
    * Returns true if the string is external
@@ -1299,8 +1244,7 @@ class String : public Primitive {
    * this function should not otherwise delete or modify the resource. Neither
    * should the underlying buffer be deallocated or modified except through the
    * destructor of the external string resource.
-   */
-  V8EXPORT static Local<String> NewExternal(
+   */ V8EXPORT static Local<String> NewExternal(
       ExternalAsciiStringResource* resource);
 
   /**
@@ -1623,25 +1567,6 @@ class Object : public Value {
   /** Sets a native pointer in an internal field. */
   V8EXPORT void SetPointerInInternalField(int index, void* value);
 
-  class V8EXPORT ExternalResource { // NOLINT
-   public:
-    ExternalResource() {}
-    virtual ~ExternalResource() {}
-
-   protected:
-    virtual void Dispose() { delete this; }
-
-   private:
-    // Disallow copying and assigning.
-    ExternalResource(const ExternalResource&);
-    void operator=(const ExternalResource&);
-
-    friend class v8::internal::Heap;
-  };
-
-  V8EXPORT void SetExternalResource(ExternalResource *);
-  V8EXPORT ExternalResource *GetExternalResource();
-
   // Testers for local properties.
   V8EXPORT bool HasOwnProperty(Handle<String> key);
   V8EXPORT bool HasRealNamedProperty(Handle<String> key);
@@ -2050,10 +1975,13 @@ class Arguments {
   inline Local<Object> Holder() const;
   inline bool IsConstructCall() const;
   inline Local<Value> Data() const;
+  inline Isolate* GetIsolate() const;
+
  private:
-  static const int kDataIndex = 0;
-  static const int kCalleeIndex = -1;
-  static const int kHolderIndex = -2;
+  static const int kIsolateIndex = 0;
+  static const int kDataIndex = -1;
+  static const int kCalleeIndex = -2;
+  static const int kHolderIndex = -3;
 
   friend class ImplementationUtilities;
   inline Arguments(internal::Object** implicit_args,
@@ -2075,9 +2003,11 @@ class V8EXPORT AccessorInfo {
  public:
   inline AccessorInfo(internal::Object** args)
       : args_(args) { }
+  inline Isolate* GetIsolate() const;
   inline Local<Value> Data() const;
   inline Local<Object> This() const;
   inline Local<Object> Holder() const;
+
  private:
   internal::Object** args_;
 };
@@ -2365,7 +2295,6 @@ class V8EXPORT FunctionTemplate : public Template {
                                        NamedPropertyQuery query,
                                        NamedPropertyDeleter remover,
                                        NamedPropertyEnumerator enumerator,
-                                       bool is_fallback,
                                        Handle<Value> data);
   void SetIndexedInstancePropertyHandler(IndexedPropertyGetter getter,
                                          IndexedPropertySetter setter,
@@ -2449,12 +2378,6 @@ class V8EXPORT ObjectTemplate : public Template {
                                NamedPropertyDeleter deleter = 0,
                                NamedPropertyEnumerator enumerator = 0,
                                Handle<Value> data = Handle<Value>());
-  void SetFallbackPropertyHandler(NamedPropertyGetter getter,
-                                  NamedPropertySetter setter = 0,
-                                  NamedPropertyQuery query = 0,
-                                  NamedPropertyDeleter deleter = 0,
-                                  NamedPropertyEnumerator enumerator = 0,
-                                  Handle<Value> data = Handle<Value>());
 
   /**
    * Sets an indexed property handler on the object template.
@@ -2526,18 +2449,6 @@ class V8EXPORT ObjectTemplate : public Template {
    */
   void SetInternalFieldCount(int value);
 
-  /**
-   * Sets whether the object can store an "external resource" object.
-   */
-  bool HasExternalResource();
-  void SetHasExternalResource(bool value);
-
-  /**
-   * Mark object instances of the template as using the user object 
-   * comparison callback.
-   */
-  void MarkAsUseUserObjectComparison();
-
  private:
   ObjectTemplate();
   static Local<ObjectTemplate> New(Handle<FunctionTemplate> constructor);
@@ -2604,7 +2515,7 @@ class V8EXPORT Extension {  // NOLINT
             int source_length = -1);
   virtual ~Extension() { }
   virtual v8::Handle<v8::FunctionTemplate>
-      GetNativeFunction(v8::Handle<v8::String>) {
+      GetNativeFunction(v8::Handle<v8::String> name) {
     return v8::Handle<v8::FunctionTemplate>();
   }
 
@@ -2653,6 +2564,11 @@ Handle<Primitive> V8EXPORT Null();
 Handle<Boolean> V8EXPORT True();
 Handle<Boolean> V8EXPORT False();
 
+inline Handle<Primitive> Undefined(Isolate* isolate);
+inline Handle<Primitive> Null(Isolate* isolate);
+inline Handle<Boolean> True(Isolate* isolate);
+inline Handle<Boolean> False(Isolate* isolate);
+
 
 /**
  * A set of constraints that specifies the limits of the runtime's memory use.
@@ -2760,10 +2676,6 @@ typedef void (*FailedAccessCheckCallback)(Local<Object> target,
                                           AccessType type,
                                           Local<Value> data);
 
-// --- User Object Comparisoa nCallback ---
-typedef bool (*UserObjectComparisonCallback)(Local<Object> lhs, 
-                                             Local<Object> rhs);
-
 // --- AllowCodeGenerationFromStrings callbacks ---
 
 /**
@@ -2907,13 +2819,13 @@ class V8EXPORT Isolate {
   /**
    * Associate embedder-specific data with the isolate
    */
-  void SetData(void* data);
+  inline void SetData(void* data);
 
   /**
-   * Retrive embedder-specific data from the isolate.
+   * Retrieve embedder-specific data from the isolate.
    * Returns NULL if SetData has never been called.
    */
-  void* GetData();
+  inline void* GetData();
 
  private:
   Isolate();
@@ -2990,7 +2902,7 @@ typedef uintptr_t (*ReturnAddressLocationResolver)(
 class V8EXPORT ExternalResourceVisitor {  // NOLINT
  public:
   virtual ~ExternalResourceVisitor() {}
-  virtual void VisitExternalString(Handle<String>) {}
+  virtual void VisitExternalString(Handle<String> string) {}
 };
 
 
@@ -3115,9 +3027,6 @@ class V8EXPORT V8 {
   /** Callback function for reporting failed access checks.*/
   static void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback);
 
-  /** Callback for user object comparisons */
-  static void SetUserObjectComparisonCallbackFunction(UserObjectComparisonCallback);
-
   /**
    * Enables the host application to receive a notification before a
    * garbage collection.  Allocations are not allowed in the
@@ -3261,7 +3170,8 @@ class V8EXPORT V8 {
    *   that is kept alive by JavaScript objects.
    * \returns the adjusted value.
    */
-  static int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
+  static intptr_t AdjustAmountOfExternalAllocatedMemory(
+      intptr_t change_in_bytes);
 
   /**
    * Suspends recording of tick samples in the profiler.
@@ -3619,8 +3529,6 @@ class V8EXPORT Context {
    * JavaScript frames an empty handle is returned.
    */
   static Local<Context> GetCalling();
-  static Local<Object> GetCallingQmlGlobal();
-  static Local<Value> GetCallingScriptData();
 
   /**
    * Sets the security token for the context.  To access an object in
@@ -3846,6 +3754,12 @@ class V8EXPORT Locker {
 
 
 /**
+ * A struct for exporting HeapStats data from V8, using "push" model.
+ */
+struct HeapStatsUpdate;
+
+
+/**
  * An interface for exporting data from V8, using "push" model.
  */
 class V8EXPORT OutputStream {  // NOLINT
@@ -3870,6 +3784,14 @@ class V8EXPORT OutputStream {  // NOLINT
    * will not be called in case writing was aborted.
    */
   virtual WriteResult WriteAsciiChunk(char* data, int size) = 0;
+  /**
+   * Writes the next chunk of heap stats data into the stream. Writing
+   * can be stopped by returning kAbort as function result. EndOfStream
+   * will not be called in case writing was aborted.
+   */
+  virtual WriteResult WriteHeapStatsChunk(HeapStatsUpdate* data, int count) {
+    return kAbort;
+  };
 };
 
 
@@ -3958,18 +3880,6 @@ const uintptr_t kEncodablePointerMask =
     PlatformSmiTagging::kEncodablePointerMask;
 const int kPointerToSmiShift = PlatformSmiTagging::kPointerToSmiShift;
 
-template <size_t ptr_size> struct InternalConstants;
-
-// Internal constants for 32-bit systems.
-template <> struct InternalConstants<4> {
-  static const int kStringResourceOffset = 3 * kApiPointerSize;
-};
-
-// Internal constants for 64-bit systems.
-template <> struct InternalConstants<8> {
-  static const int kStringResourceOffset = 3 * kApiPointerSize;
-};
-
 /**
  * This class exports constants and functionality from within v8 that
  * is necessary to implement inline functions in the v8 api.  Don't
@@ -3981,18 +3891,31 @@ class Internals {
   // the implementation of v8.
   static const int kHeapObjectMapOffset = 0;
   static const int kMapInstanceTypeOffset = 1 * kApiPointerSize + kApiIntSize;
-  static const int kStringResourceOffset =
-      InternalConstants<kApiPointerSize>::kStringResourceOffset;
+  static const int kStringResourceOffset = 3 * kApiPointerSize;
 
+  static const int kOddballKindOffset = 3 * kApiPointerSize;
   static const int kForeignAddressOffset = kApiPointerSize;
   static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
   static const int kFullStringRepresentationMask = 0x07;
   static const int kExternalTwoByteRepresentationTag = 0x02;
 
+  static const int kIsolateStateOffset = 0;
+  static const int kIsolateEmbedderDataOffset = 1 * kApiPointerSize;
+  static const int kIsolateRootsOffset = 3 * kApiPointerSize;
+  static const int kUndefinedValueRootIndex = 5;
+  static const int kNullValueRootIndex = 7;
+  static const int kTrueValueRootIndex = 8;
+  static const int kFalseValueRootIndex = 9;
+  static const int kEmptySymbolRootIndex = 128;
+
   static const int kJSObjectType = 0xaa;
   static const int kFirstNonstringType = 0x80;
+  static const int kOddballType = 0x82;
   static const int kForeignType = 0x85;
 
+  static const int kUndefinedOddballKind = 5;
+  static const int kNullOddballKind = 3;
+
   static inline bool HasHeapObjectTag(internal::Object* value) {
     return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
             kHeapObjectTag);
@@ -4012,6 +3935,11 @@ class Internals {
     return ReadField<uint8_t>(map, kMapInstanceTypeOffset);
   }
 
+  static inline int GetOddballKind(internal::Object* obj) {
+    typedef internal::Object O;
+    return SmiValue(ReadField<O*>(obj, kOddballKindOffset));
+  }
+
   static inline void* GetExternalPointerFromSmi(internal::Object* value) {
     const uintptr_t address = reinterpret_cast<uintptr_t>(value);
     return reinterpret_cast<void*>(address >> kPointerToSmiShift);
@@ -4032,19 +3960,41 @@ class Internals {
     return representation == kExternalTwoByteRepresentationTag;
   }
 
+  static inline bool IsInitialized(v8::Isolate* isolate) {
+    uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) + kIsolateStateOffset;
+    return *reinterpret_cast<int*>(addr) == 1;
+  }
+
+  static inline void SetEmbedderData(v8::Isolate* isolate, void* data) {
+    uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
+        kIsolateEmbedderDataOffset;
+    *reinterpret_cast<void**>(addr) = data;
+  }
+
+  static inline void* GetEmbedderData(v8::Isolate* isolate) {
+    uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
+        kIsolateEmbedderDataOffset;
+    return *reinterpret_cast<void**>(addr);
+  }
+
+  static inline internal::Object** GetRoot(v8::Isolate* isolate, int index) {
+    uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) + kIsolateRootsOffset;
+    return reinterpret_cast<internal::Object**>(addr + index * kApiPointerSize);
+  }
+
   template <typename T>
   static inline T ReadField(Object* ptr, int offset) {
     uint8_t* addr = reinterpret_cast<uint8_t*>(ptr) + offset - kHeapObjectTag;
     return *reinterpret_cast<T*>(addr);
   }
 
-  static inline bool CanCastToHeapObject(void*) { return false; }
-  static inline bool CanCastToHeapObject(Context*) { return true; }
-  static inline bool CanCastToHeapObject(String*) { return true; }
-  static inline bool CanCastToHeapObject(Object*) { return true; }
-  static inline bool CanCastToHeapObject(Message*) { return true; }
-  static inline bool CanCastToHeapObject(StackTrace*) { return true; }
-  static inline bool CanCastToHeapObject(StackFrame*) { return true; }
+  static inline bool CanCastToHeapObject(void* o) { return false; }
+  static inline bool CanCastToHeapObject(Context* o) { return true; }
+  static inline bool CanCastToHeapObject(String* o) { return true; }
+  static inline bool CanCastToHeapObject(Object* o) { return true; }
+  static inline bool CanCastToHeapObject(Message* o) { return true; }
+  static inline bool CanCastToHeapObject(StackTrace* o) { return true; }
+  static inline bool CanCastToHeapObject(StackFrame* o) { return true; }
 };
 
 }  // namespace internal
@@ -4158,6 +4108,11 @@ Local<Value> Arguments::Data() const {
 }
 
 
+Isolate* Arguments::GetIsolate() const {
+  return *reinterpret_cast<Isolate**>(&implicit_args_[kIsolateIndex]);
+}
+
+
 bool Arguments::IsConstructCall() const {
   return is_construct_call_;
 }
@@ -4270,6 +4225,15 @@ String* String::Cast(v8::Value* value) {
 }
 
 
+Local<String> String::Empty(Isolate* isolate) {
+  typedef internal::Object* S;
+  typedef internal::Internals I;
+  if (!I::IsInitialized(isolate)) return Empty();
+  S* slot = I::GetRoot(isolate, I::kEmptySymbolRootIndex);
+  return Local<String>(reinterpret_cast<String*>(slot));
+}
+
+
 String::ExternalStringResource* String::GetExternalStringResource() const {
   typedef internal::Object O;
   typedef internal::Internals I;
@@ -4288,6 +4252,42 @@ String::ExternalStringResource* String::GetExternalStringResource() const {
 }
 
 
+bool Value::IsUndefined() const {
+#ifdef V8_ENABLE_CHECKS
+  return FullIsUndefined();
+#else
+  return QuickIsUndefined();
+#endif
+}
+
+bool Value::QuickIsUndefined() const {
+  typedef internal::Object O;
+  typedef internal::Internals I;
+  O* obj = *reinterpret_cast<O**>(const_cast<Value*>(this));
+  if (!I::HasHeapObjectTag(obj)) return false;
+  if (I::GetInstanceType(obj) != I::kOddballType) return false;
+  return (I::GetOddballKind(obj) == I::kUndefinedOddballKind);
+}
+
+
+bool Value::IsNull() const {
+#ifdef V8_ENABLE_CHECKS
+  return FullIsNull();
+#else
+  return QuickIsNull();
+#endif
+}
+
+bool Value::QuickIsNull() const {
+  typedef internal::Object O;
+  typedef internal::Internals I;
+  O* obj = *reinterpret_cast<O**>(const_cast<Value*>(this));
+  if (!I::HasHeapObjectTag(obj)) return false;
+  if (I::GetInstanceType(obj) != I::kOddballType) return false;
+  return (I::GetOddballKind(obj) == I::kNullOddballKind);
+}
+
+
 bool Value::IsString() const {
 #ifdef V8_ENABLE_CHECKS
   return FullIsString();
@@ -4393,6 +4393,11 @@ External* External::Cast(v8::Value* value) {
 }
 
 
+Isolate* AccessorInfo::GetIsolate() const {
+  return *reinterpret_cast<Isolate**>(&args_[-3]);
+}
+
+
 Local<Value> AccessorInfo::Data() const {
   return Local<Value>(reinterpret_cast<Value*>(&args_[-2]));
 }
@@ -4408,6 +4413,54 @@ Local<Object> AccessorInfo::Holder() const {
 }
 
 
+Handle<Primitive> Undefined(Isolate* isolate) {
+  typedef internal::Object* S;
+  typedef internal::Internals I;
+  if (!I::IsInitialized(isolate)) return Undefined();
+  S* slot = I::GetRoot(isolate, I::kUndefinedValueRootIndex);
+  return Handle<Primitive>(reinterpret_cast<Primitive*>(slot));
+}
+
+
+Handle<Primitive> Null(Isolate* isolate) {
+  typedef internal::Object* S;
+  typedef internal::Internals I;
+  if (!I::IsInitialized(isolate)) return Null();
+  S* slot = I::GetRoot(isolate, I::kNullValueRootIndex);
+  return Handle<Primitive>(reinterpret_cast<Primitive*>(slot));
+}
+
+
+Handle<Boolean> True(Isolate* isolate) {
+  typedef internal::Object* S;
+  typedef internal::Internals I;
+  if (!I::IsInitialized(isolate)) return True();
+  S* slot = I::GetRoot(isolate, I::kTrueValueRootIndex);
+  return Handle<Boolean>(reinterpret_cast<Boolean*>(slot));
+}
+
+
+Handle<Boolean> False(Isolate* isolate) {
+  typedef internal::Object* S;
+  typedef internal::Internals I;
+  if (!I::IsInitialized(isolate)) return False();
+  S* slot = I::GetRoot(isolate, I::kFalseValueRootIndex);
+  return Handle<Boolean>(reinterpret_cast<Boolean*>(slot));
+}
+
+
+void Isolate::SetData(void* data) {
+  typedef internal::Internals I;
+  I::SetEmbedderData(this, data);
+}
+
+
+void* Isolate::GetData() {
+  typedef internal::Internals I;
+  return I::GetEmbedderData(this);
+}
+
+
 /**
  * \example shell.cc
  * A simple shell that takes a list of expressions on the
index 1606a8f..7a84a2a 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -434,9 +434,9 @@ v8::Handle<v8::String> ReadLine() {
   }
   if (res == NULL) {
     v8::Handle<v8::Primitive> t = v8::Undefined();
-    return reinterpret_cast<v8::Handle<v8::String>&>(t);
+    return v8::Handle<v8::String>(v8::String::Cast(*t));
   }
-  // remove newline char
+  // Remove newline char
   for (char* pos = buffer; *pos != '\0'; pos++) {
     if (*pos == '\n') {
       *pos = '\0';
index 55b2a98..3c720a7 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2011 the V8 project authors. All rights reserved.
+# Copyright 2012 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
       'sources': [
         'process.cc',
       ],
+    },
+    {
+      'target_name': 'lineprocessor',
+      'sources': [
+        'lineprocessor.cc',
+      ],
     }
   ],
 }
index 78fc0b1..52a84ed 100644 (file)
@@ -1152,7 +1152,6 @@ void FunctionTemplate::SetNamedInstancePropertyHandler(
       NamedPropertyQuery query,
       NamedPropertyDeleter remover,
       NamedPropertyEnumerator enumerator,
-      bool is_fallback,
       Handle<Value> data) {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   if (IsDeadCheck(isolate,
@@ -1171,7 +1170,6 @@ void FunctionTemplate::SetNamedInstancePropertyHandler(
   if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
   if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
   if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
-  obj->set_is_fallback(i::Smi::FromInt(is_fallback));
 
   if (data.IsEmpty()) data = v8::Undefined();
   obj->set_data(*Utils::OpenHandle(*data));
@@ -1316,33 +1314,6 @@ void ObjectTemplate::SetNamedPropertyHandler(NamedPropertyGetter getter,
                                                         query,
                                                         remover,
                                                         enumerator,
-                                                        false,
-                                                        data);
-}
-
-
-void ObjectTemplate::SetFallbackPropertyHandler(NamedPropertyGetter getter,
-                                                NamedPropertySetter setter,
-                                                NamedPropertyQuery query,
-                                                NamedPropertyDeleter remover,
-                                                NamedPropertyEnumerator enumerator,
-                                                Handle<Value> data) {
-  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetNamedPropertyHandler()")) {
-    return;
-  }
-  ENTER_V8(isolate);
-  i::HandleScope scope(isolate);
-  EnsureConstructor(this);
-  i::FunctionTemplateInfo* constructor =
-      i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
-  i::Handle<i::FunctionTemplateInfo> cons(constructor);
-  Utils::ToLocal(cons)->SetNamedInstancePropertyHandler(getter,
-                                                        setter,
-                                                        query,
-                                                        remover,
-                                                        enumerator,
-                                                        true,
                                                         data);
 }
 
@@ -1465,45 +1436,6 @@ void ObjectTemplate::SetInternalFieldCount(int value) {
 }
 
 
-bool ObjectTemplate::HasExternalResource()
-{
-  if (IsDeadCheck(Utils::OpenHandle(this)->GetIsolate(),
-                  "v8::ObjectTemplate::HasExternalResource()")) {
-    return 0;
-  }
-  return !Utils::OpenHandle(this)->has_external_resource()->IsUndefined();
-}
-
-
-void ObjectTemplate::SetHasExternalResource(bool value)
-{
-  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetHasExternalResource()")) {
-    return;
-  }
-  ENTER_V8(isolate);
-  if (value) {
-    EnsureConstructor(this);
-  }
-  if (value) {
-      Utils::OpenHandle(this)->set_has_external_resource(i::Smi::FromInt(1));
-  } else {
-      Utils::OpenHandle(this)->set_has_external_resource(Utils::OpenHandle(this)->GetHeap()->undefined_value());
-  }
-}
-
-
-void ObjectTemplate::MarkAsUseUserObjectComparison()
-{
-  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  if (IsDeadCheck(isolate, "v8::ObjectTemplate::MarkAsUseUserObjectComparison()")) {
-    return;
-  }
-  ENTER_V8(isolate);
-  EnsureConstructor(this);
-  Utils::OpenHandle(this)->set_use_user_object_comparison(i::Smi::FromInt(1));
-}
-
 // --- S c r i p t D a t a ---
 
 
@@ -1554,8 +1486,7 @@ ScriptData* ScriptData::New(const char* data, int length) {
 Local<Script> Script::New(v8::Handle<String> source,
                           v8::ScriptOrigin* origin,
                           v8::ScriptData* pre_data,
-                          v8::Handle<String> script_data,
-                          v8::Script::CompileFlags compile_flags) {
+                          v8::Handle<String> script_data) {
   i::Isolate* isolate = i::Isolate::Current();
   ON_BAILOUT(isolate, "v8::Script::New()", return Local<Script>());
   LOG_API(isolate, "Script::New");
@@ -1596,8 +1527,7 @@ Local<Script> Script::New(v8::Handle<String> source,
                            NULL,
                            pre_data_impl,
                            Utils::OpenHandle(*script_data),
-                           i::NOT_NATIVES_CODE,
-                           compile_flags);
+                           i::NOT_NATIVES_CODE);
     has_pending_exception = result.is_null();
     EXCEPTION_BAILOUT_CHECK(isolate, Local<Script>());
     raw_result = *result;
@@ -1608,23 +1538,21 @@ Local<Script> Script::New(v8::Handle<String> source,
 
 
 Local<Script> Script::New(v8::Handle<String> source,
-                          v8::Handle<Value> file_name,
-                          v8::Script::CompileFlags compile_flags) {
+                          v8::Handle<Value> file_name) {
   ScriptOrigin origin(file_name);
-  return New(source, &origin, 0, Handle<String>(), compile_flags);
+  return New(source, &origin);
 }
 
 
 Local<Script> Script::Compile(v8::Handle<String> source,
                               v8::ScriptOrigin* origin,
                               v8::ScriptData* pre_data,
-                              v8::Handle<String> script_data,
-                              v8::Script::CompileFlags compile_flags) {
+                              v8::Handle<String> script_data) {
   i::Isolate* isolate = i::Isolate::Current();
   ON_BAILOUT(isolate, "v8::Script::Compile()", return Local<Script>());
   LOG_API(isolate, "Script::Compile");
   ENTER_V8(isolate);
-  Local<Script> generic = New(source, origin, pre_data, script_data, compile_flags);
+  Local<Script> generic = New(source, origin, pre_data, script_data);
   if (generic.IsEmpty())
     return generic;
   i::Handle<i::Object> obj = Utils::OpenHandle(*generic);
@@ -1640,18 +1568,13 @@ Local<Script> Script::Compile(v8::Handle<String> source,
 
 Local<Script> Script::Compile(v8::Handle<String> source,
                               v8::Handle<Value> file_name,
-                              v8::Handle<String> script_data,
-                              v8::Script::CompileFlags compile_flags) {
+                              v8::Handle<String> script_data) {
   ScriptOrigin origin(file_name);
-  return Compile(source, &origin, 0, script_data, compile_flags);
+  return Compile(source, &origin, 0, script_data);
 }
 
 
 Local<Value> Script::Run() {
-    return Run(Handle<Object>());
-}
-
-Local<Value> Script::Run(Handle<Object> qml) {
   i::Isolate* isolate = i::Isolate::Current();
   ON_BAILOUT(isolate, "v8::Script::Run()", return Local<Value>());
   LOG_API(isolate, "Script::Run");
@@ -1670,11 +1593,10 @@ Local<Value> Script::Run(Handle<Object> qml) {
       fun = i::Handle<i::JSFunction>(i::JSFunction::cast(*obj), isolate);
     }
     EXCEPTION_PREAMBLE(isolate);
-    i::Handle<i::Object> qmlglobal = Utils::OpenHandle(*qml);
     i::Handle<i::Object> receiver(
         isolate->context()->global_proxy(), isolate);
     i::Handle<i::Object> result =
-        i::Execution::Call(fun, receiver, 0, NULL, &has_pending_exception, false, qmlglobal);
+        i::Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
     EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>());
     raw_result = *result;
   }
@@ -2179,17 +2101,21 @@ bool StackFrame::IsConstructor() const {
 
 // --- D a t a ---
 
-bool Value::IsUndefined() const {
+bool Value::FullIsUndefined() const {
   if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsUndefined()")) {
     return false;
   }
-  return Utils::OpenHandle(this)->IsUndefined();
+  bool result = Utils::OpenHandle(this)->IsUndefined();
+  ASSERT_EQ(result, QuickIsUndefined());
+  return result;
 }
 
 
-bool Value::IsNull() const {
+bool Value::FullIsNull() const {
   if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsNull()")) return false;
-  return Utils::OpenHandle(this)->IsNull();
+  bool result = Utils::OpenHandle(this)->IsNull();
+  ASSERT_EQ(result, QuickIsNull());
+  return result;
 }
 
 
@@ -2362,12 +2288,6 @@ bool Value::IsRegExp() const {
   return obj->IsJSRegExp();
 }
 
-bool Value::IsError() const {
-  if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsError()")) return false;
-  i::Handle<i::Object> obj = Utils::OpenHandle(this);
-  return obj->HasSpecificClassOf(HEAP->Error_symbol());
-}
-
 
 Local<String> Value::ToString() const {
   i::Handle<i::Object> obj = Utils::OpenHandle(this);
@@ -2893,9 +2813,13 @@ bool v8::Object::ForceDelete(v8::Handle<Value> key) {
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
   i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
 
-  // When turning on access checks for a global object deoptimize all functions
-  // as optimized code does not always handle access checks.
-  i::Deoptimizer::DeoptimizeGlobalObject(*self);
+  // When deleting a property on the global object using ForceDelete
+  // deoptimize all functions as optimized code does not check for the hole
+  // value with DontDelete properties.  We have to deoptimize all contexts
+  // because of possible cross-context inlined functions.
+  if (self->IsJSGlobalProxy() || self->IsGlobalObject()) {
+    i::Deoptimizer::DeoptimizeAll();
+  }
 
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> obj = i::ForceDeleteProperty(self, key_obj);
@@ -3888,59 +3812,6 @@ bool String::MayContainNonAscii() const {
 }
 
 
-uint32_t String::Hash() const {
-  i::Handle<i::String> str = Utils::OpenHandle(this);
-  if (IsDeadCheck(str->GetIsolate(), "v8::String::Hash()")) return 0;
-  return str->Hash();
-}
-
-
-String::CompleteHashData String::CompleteHash() const {
-  i::Handle<i::String> str = Utils::OpenHandle(this);
-  if (IsDeadCheck(str->GetIsolate(), "v8::String::CompleteHash()")) {
-    return CompleteHashData();
-  }
-  CompleteHashData result;
-  result.length = str->length();
-  result.hash = str->Hash();
-  if (str->IsSeqString())
-      result.symbol_id = i::SeqString::cast(*str)->symbol_id();
-  return result;
-}
-
-
-uint32_t String::ComputeHash(uint16_t *string, int length) {
-  return i::HashSequentialString<i::uc16>(string, length, i::kZeroHashSeed) >>
-      i::String::kHashShift;
-}
-
-
-uint32_t String::ComputeHash(char *string, int length) {
-  return i::HashSequentialString<char>(string, length, i::kZeroHashSeed) >>
-      i::String::kHashShift;
-}
-
-
-uint16_t String::GetCharacter(int index) {
-  i::Handle<i::String> str = Utils::OpenHandle(this);
-  return str->Get(index);
-}
-
-
-bool String::Equals(uint16_t *string, int length) {
-  i::Handle<i::String> str = Utils::OpenHandle(this);
-  if (IsDeadCheck(str->GetIsolate(), "v8::String::Equals()")) return 0;
-  return str->SlowEqualsExternal(string, length);
-}
-
-
-bool String::Equals(char *string, int length) {
-  i::Handle<i::String> str = Utils::OpenHandle(this);
-  if (IsDeadCheck(str->GetIsolate(), "v8::String::Equals()")) return 0;
-  return str->SlowEqualsExternal(string, length);
-}
-
-
 int String::WriteUtf8(char* buffer,
                       int capacity,
                       int* nchars_ref,
@@ -4313,34 +4184,6 @@ void v8::Object::SetPointerInInternalField(int index, void* value) {
 }
 
 
-void v8::Object::SetExternalResource(v8::Object::ExternalResource *resource) {
-  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  ENTER_V8(isolate);
-  i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
-  if (CanBeEncodedAsSmi(resource)) {
-    obj->SetExternalResourceObject(EncodeAsSmi(resource));
-  } else {
-    obj->SetExternalResourceObject(*isolate->factory()->NewForeign(static_cast<i::Address>((void *)resource)));
-  }
-  if (!obj->IsSymbol()) {
-    isolate->heap()->external_string_table()->AddObject(*obj);
-  }
-}
-
-
-v8::Object::ExternalResource *v8::Object::GetExternalResource() {
-  i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
-  i::Object* value = obj->GetExternalResourceObject();
-  if (value->IsSmi()) {
-    return reinterpret_cast<v8::Object::ExternalResource*>(i::Internals::GetExternalPointerFromSmi(value));
-  } else if (value->IsForeign()) {
-    return reinterpret_cast<v8::Object::ExternalResource*>(i::Foreign::cast(value)->foreign_address());
-  } else {
-    return NULL;
-  }
-}
-
-
 // --- E n v i r o n m e n t ---
 
 
@@ -4607,37 +4450,6 @@ v8::Local<v8::Context> Context::GetCalling() {
 }
 
 
-v8::Local<v8::Object> Context::GetCallingQmlGlobal() {
-  i::Isolate* isolate = i::Isolate::Current();
-  if (IsDeadCheck(isolate, "v8::Context::GetCallingQmlGlobal()")) {
-    return Local<Object>();
-  }
-
-  i::Context *context = isolate->context();
-  i::JavaScriptFrameIterator it;
-  if (it.done()) return Local<Object>();
-  context = i::Context::cast(it.frame()->context());
-  if (!context->qml_global()->IsUndefined()) {
-    i::Handle<i::Object> qmlglobal(context->qml_global());
-    return Utils::ToLocal(i::Handle<i::JSObject>::cast(qmlglobal));
-  } else {
-      return Local<Object>();
-  }
-}
-
-v8::Local<v8::Value> Context::GetCallingScriptData()
-{
-  i::Isolate* isolate = i::Isolate::Current();
-  if (IsDeadCheck(isolate, "v8::Context::GetCallingScriptData()")) {
-    return Local<Object>();
-  }
-
-  i::JavaScriptFrameIterator it;
-  if (it.done()) return Local<Object>();
-  i::Handle<i::Script> script(i::Script::cast(i::JSFunction::cast(it.frame()->function())->shared()->script()));
-  return Utils::ToLocal(i::Handle<i::Object>(script->data()));
-}
-
 v8::Local<v8::Object> Context::Global() {
   if (IsDeadCheck(i::Isolate::Current(), "v8::Context::Global()")) {
     return Local<v8::Object>();
@@ -4818,7 +4630,9 @@ void* External::Value() const {
 
 Local<String> v8::String::Empty() {
   i::Isolate* isolate = i::Isolate::Current();
-  EnsureInitializedForIsolate(isolate, "v8::String::Empty()");
+  if (!EnsureInitializedForIsolate(isolate, "v8::String::Empty()")) {
+    return v8::Local<String>();
+  }
   LOG_API(isolate, "String::Empty()");
   return Utils::ToLocal(isolate->factory()->empty_symbol());
 }
@@ -5381,17 +5195,6 @@ void V8::SetFailedAccessCheckCallbackFunction(
   isolate->SetFailedAccessCheckCallback(callback);
 }
 
-
-void V8::SetUserObjectComparisonCallbackFunction(
-      UserObjectComparisonCallback callback) {
-  i::Isolate* isolate = i::Isolate::Current();
-  if (IsDeadCheck(isolate, "v8::V8::SetUserObjectComparisonCallbackFunction()")) {
-    return;
-  }
-  isolate->SetUserObjectComparisonCallback(callback);
-}
-
-
 void V8::AddObjectGroup(Persistent<Value>* objects,
                         size_t length,
                         RetainedObjectInfo* info) {
@@ -5415,7 +5218,7 @@ void V8::AddImplicitReferences(Persistent<Object> parent,
 }
 
 
-int V8::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
+intptr_t V8::AdjustAmountOfExternalAllocatedMemory(intptr_t change_in_bytes) {
   i::Isolate* isolate = i::Isolate::Current();
   if (IsDeadCheck(isolate, "v8::V8::AdjustAmountOfExternalAllocatedMemory()")) {
     return 0;
@@ -5595,17 +5398,6 @@ void Isolate::Exit() {
 }
 
 
-void Isolate::SetData(void* data) {
-  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
-  isolate->SetData(data);
-}
-
-void* Isolate::GetData() {
-  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
-  return isolate->GetData();
-}
-
-
 String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
     : str_(NULL), length_(0) {
   i::Isolate* isolate = i::Isolate::Current();
@@ -6205,7 +5997,7 @@ Handle<Value> HeapGraphEdge::GetName() const {
 const HeapGraphNode* HeapGraphEdge::GetFromNode() const {
   i::Isolate* isolate = i::Isolate::Current();
   IsDeadCheck(isolate, "v8::HeapGraphEdge::GetFromNode");
-  const i::HeapEntry* from = ToInternal(this)->From();
+  const i::HeapEntry* from = ToInternal(this)->from();
   return reinterpret_cast<const HeapGraphNode*>(from);
 }
 
@@ -6271,7 +6063,7 @@ const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
   i::Isolate* isolate = i::Isolate::Current();
   IsDeadCheck(isolate, "v8::HeapSnapshot::GetChild");
   return reinterpret_cast<const HeapGraphEdge*>(
-      &ToInternal(this)->children()[index]);
+      ToInternal(this)->children()[index]);
 }
 
 
@@ -6365,7 +6157,7 @@ const HeapGraphNode* HeapSnapshot::GetNodeById(SnapshotObjectId id) const {
 int HeapSnapshot::GetNodesCount() const {
   i::Isolate* isolate = i::Isolate::Current();
   IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodesCount");
-  return ToInternal(this)->entries()->length();
+  return ToInternal(this)->entries().length();
 }
 
 
@@ -6373,7 +6165,7 @@ const HeapGraphNode* HeapSnapshot::GetNode(int index) const {
   i::Isolate* isolate = i::Isolate::Current();
   IsDeadCheck(isolate, "v8::HeapSnapshot::GetNode");
   return reinterpret_cast<const HeapGraphNode*>(
-      ToInternal(this)->entries()->at(index));
+      &ToInternal(this)->entries().at(index));
 }
 
 
@@ -6425,6 +6217,14 @@ const HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
 }
 
 
+SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Value> value) {
+  i::Isolate* isolate = i::Isolate::Current();
+  IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshotObjectId");
+  i::Handle<i::Object> obj = Utils::OpenHandle(*value);
+  return i::HeapProfiler::GetSnapshotObjectId(obj);
+}
+
+
 const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
                                                HeapSnapshot::Type type,
                                                ActivityControl* control) {
@@ -6444,6 +6244,27 @@ const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
 }
 
 
+void HeapProfiler::StartHeapObjectsTracking() {
+  i::Isolate* isolate = i::Isolate::Current();
+  IsDeadCheck(isolate, "v8::HeapProfiler::StartHeapObjectsTracking");
+  i::HeapProfiler::StartHeapObjectsTracking();
+}
+
+
+void HeapProfiler::StopHeapObjectsTracking() {
+  i::Isolate* isolate = i::Isolate::Current();
+  IsDeadCheck(isolate, "v8::HeapProfiler::StopHeapObjectsTracking");
+  i::HeapProfiler::StopHeapObjectsTracking();
+}
+
+
+void HeapProfiler::PushHeapObjectsStats(OutputStream* stream) {
+  i::Isolate* isolate = i::Isolate::Current();
+  IsDeadCheck(isolate, "v8::HeapProfiler::PushHeapObjectsStats");
+  return i::HeapProfiler::PushHeapObjectsStats(stream);
+}
+
+
 void HeapProfiler::DeleteAllSnapshots() {
   i::Isolate* isolate = i::Isolate::Current();
   IsDeadCheck(isolate, "v8::HeapProfiler::DeleteAllSnapshots");
@@ -6491,7 +6312,11 @@ static void SetFlagsFromString(const char* flags) {
 
 void Testing::PrepareStressRun(int run) {
   static const char* kLazyOptimizations =
-      "--prepare-always-opt --nolimit-inlining --noalways-opt";
+      "--prepare-always-opt "
+      "--max-inlined-source-size=999999 "
+      "--max-inlined-nodes=999999 "
+      "--max-inlined-nodes-cumulative=999999 "
+      "--noalways-opt";
   static const char* kForcedOptimizations = "--always-opt";
 
   // If deoptimization stressed turn on frequent deoptimization. If no value
index 68579af..71c0e1c 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -40,14 +40,17 @@ class ImplementationUtilities {
   }
 
   // Packs additional parameters for the NewArguments function. |implicit_args|
-  // is a pointer to the last element of 3-elements array controlled by GC.
+  // is a pointer to the last element of 4-elements array controlled by GC.
   static void PrepareArgumentsData(internal::Object** implicit_args,
+                                   internal::Isolate* isolate,
                                    internal::Object* data,
                                    internal::JSFunction* callee,
                                    internal::Object* holder) {
     implicit_args[v8::Arguments::kDataIndex] = data;
     implicit_args[v8::Arguments::kCalleeIndex] = callee;
     implicit_args[v8::Arguments::kHolderIndex] = holder;
+    implicit_args[v8::Arguments::kIsolateIndex] =
+        reinterpret_cast<internal::Object*>(isolate);
   }
 
   static v8::Arguments NewArguments(internal::Object** implicit_args,
@@ -55,6 +58,8 @@ class ImplementationUtilities {
                                     bool is_construct_call) {
     ASSERT(implicit_args[v8::Arguments::kCalleeIndex]->IsJSFunction());
     ASSERT(implicit_args[v8::Arguments::kHolderIndex]->IsHeapObject());
+    // The implicit isolate argument is not tagged and looks like a SMI.
+    ASSERT(implicit_args[v8::Arguments::kIsolateIndex]->IsSmi());
 
     return v8::Arguments(implicit_args, argv, argc, is_construct_call);
   }
index e9a3270..f8fb00c 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -91,9 +91,11 @@ class CustomArguments : public Relocatable {
                          Object* data,
                          Object* self,
                          JSObject* holder) : Relocatable(isolate) {
-    values_[2] = self;
-    values_[1] = holder;
-    values_[0] = data;
+    ASSERT(reinterpret_cast<Object*>(isolate)->IsSmi());
+    values_[3] = self;
+    values_[2] = holder;
+    values_[1] = data;
+    values_[0] = reinterpret_cast<Object*>(isolate);
   }
 
   inline explicit CustomArguments(Isolate* isolate) : Relocatable(isolate) {
@@ -106,8 +108,9 @@ class CustomArguments : public Relocatable {
 
   void IterateInstance(ObjectVisitor* v);
   Object** end() { return values_ + ARRAY_SIZE(values_) - 1; }
+
  private:
-  Object* values_[3];
+  Object* values_[4];
 };
 
 
index dd13f88..ad2ab7e 100644 (file)
@@ -169,10 +169,6 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
   __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
   __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
 
-  // Copy the qml global object from the surrounding context.
-  __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::QML_GLOBAL_INDEX)));
-  __ str(r1, MemOperand(r0, Context::SlotOffset(Context::QML_GLOBAL_INDEX)));
-
   // Initialize the rest of the slots to undefined.
   __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
   for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
@@ -237,10 +233,6 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
   __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
   __ str(r2, ContextOperand(r0, Context::GLOBAL_INDEX));
 
-  // Copy the qml global object from the surrounding context.
-  __ ldr(r1, ContextOperand(cp, Context::QML_GLOBAL_INDEX));
-  __ str(r1, ContextOperand(r0, Context::QML_GLOBAL_INDEX));
-
   // Initialize the rest of the slots to the hole value.
   __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
   for (int i = 0; i < slots_; i++) {
@@ -1642,37 +1634,6 @@ void CompareStub::Generate(MacroAssembler* masm) {
   // NOTICE! This code is only reached after a smi-fast-case check, so
   // it is certain that at least one operand isn't a smi.
 
-  {
-      Label not_user_equal, user_equal;
-      __ and_(r2, r1, Operand(r0));
-      __ tst(r2, Operand(kSmiTagMask));
-      __ b(eq, &not_user_equal);
-
-      __ CompareObjectType(r0, r2, r4, JS_OBJECT_TYPE);
-      __ b(ne, &not_user_equal);
-
-      __ CompareObjectType(r1, r3, r4, JS_OBJECT_TYPE);
-      __ b(ne, &not_user_equal);
-
-      __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
-      __ and_(r2, r2, Operand(1 << Map::kUseUserObjectComparison));
-      __ cmp(r2, Operand(1 << Map::kUseUserObjectComparison));
-      __ b(eq, &user_equal);
-
-      __ ldrb(r3, FieldMemOperand(r3, Map::kBitField2Offset));
-      __ and_(r3, r3, Operand(1 << Map::kUseUserObjectComparison));
-      __ cmp(r3, Operand(1 << Map::kUseUserObjectComparison));
-      __ b(ne, &not_user_equal);
-
-      __ bind(&user_equal);
-
-      __ Push(r0, r1);
-      __ TailCallRuntime(Runtime::kUserObjectEquals, 2, 1);
-
-      __ bind(&not_user_equal);
-  }
-
-
   // Handle the case where the objects are identical.  Either returns the answer
   // or goes to slow.  Only falls through if the objects were not identical.
   EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
@@ -5208,9 +5169,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
     __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
     __ b(ne, &call);
     // Patch the receiver on the stack with the global receiver object.
-    __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
-    __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
-    __ str(r2, MemOperand(sp, argc_ * kPointerSize));
+    __ ldr(r3, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+    __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset));
+    __ str(r3, MemOperand(sp, argc_ * kPointerSize));
     __ bind(&call);
   }
 
@@ -5218,9 +5179,13 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
   // r1: pushed function (to be verified)
   __ JumpIfSmi(r1, &non_function);
   // Get the map of the function object.
-  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+  __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
   __ b(ne, &slow);
 
+  if (RecordCallTarget()) {
+    GenerateRecordCallTarget(masm);
+  }
+
   // Fast-case: Invoke the function now.
   // r1: pushed function
   ParameterCount actual(argc_);
@@ -5244,8 +5209,17 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
 
   // Slow-case: Non-function called.
   __ bind(&slow);
+  if (RecordCallTarget()) {
+    // If there is a call target cache, mark it megamorphic in the
+    // non-function case.  MegamorphicSentinel is an immortal immovable
+    // object (undefined) so no write barrier is needed.
+    ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
+              masm->isolate()->heap()->undefined_value());
+    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+    __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+  }
   // Check for function proxy.
-  __ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
+  __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
   __ b(ne, &non_function);
   __ push(r1);  // put proxy as additional argument
   __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE));
@@ -5912,36 +5886,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
   // r2: result string length
   __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
   __ cmp(r2, Operand(r4, ASR, 1));
+  // Return original string.
   __ b(eq, &return_r0);
+  // Longer than original string's length or negative: unsafe arguments.
+  __ b(hi, &runtime);
+  // Shorter than original string's length: an actual substring.
 
-  Label result_longer_than_two;
-  // Check for special case of two character ASCII string, in which case
-  // we do a lookup in the symbol table first.
-  __ cmp(r2, Operand(2));
-  __ b(gt, &result_longer_than_two);
-  __ b(lt, &runtime);
-
-  __ JumpIfInstanceTypeIsNotSequentialAscii(r1, r1, &runtime);
-
-  // Get the two characters forming the sub string.
-  __ add(r0, r0, Operand(r3));
-  __ ldrb(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
-  __ ldrb(r4, FieldMemOperand(r0, SeqAsciiString::kHeaderSize + 1));
-
-  // Try to lookup two character string in symbol table.
-  Label make_two_character_string;
-  StringHelper::GenerateTwoCharacterSymbolTableProbe(
-      masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
-  __ jmp(&return_r0);
-
-  // r2: result string length.
-  // r3: two characters combined into halfword in little endian byte order.
-  __ bind(&make_two_character_string);
-  __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
-  __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
-  __ jmp(&return_r0);
-
-  __ bind(&result_longer_than_two);
   // Deal with different string types: update the index if necessary
   // and put the underlying string into r5.
   // r0: original string
@@ -6780,18 +6730,10 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
   __ and_(r2, r1, Operand(r0));
   __ JumpIfSmi(r2, &miss);
 
-  __ CompareObjectType(r0, r2, r3, JS_OBJECT_TYPE);
+  __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
   __ b(ne, &miss);
-  __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
-  __ and_(r2, r2, Operand(1 << Map::kUseUserObjectComparison));
-  __ cmp(r2, Operand(1 << Map::kUseUserObjectComparison));
-  __ b(eq, &miss);
-  __ CompareObjectType(r1, r2, r3, JS_OBJECT_TYPE);
+  __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
   __ b(ne, &miss);
-  __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
-  __ and_(r2, r2, Operand(1 << Map::kUseUserObjectComparison));
-  __ cmp(r2, Operand(1 << Map::kUseUserObjectComparison));
-  __ b(eq, &miss);
 
   ASSERT(GetCondition() == eq);
   __ sub(r0, r0, Operand(r1));
@@ -6810,16 +6752,8 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
   __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
   __ cmp(r2, Operand(known_map_));
   __ b(ne, &miss);
-  __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
-  __ and_(r2, r2, Operand(1 << Map::kUseUserObjectComparison));
-  __ cmp(r2, Operand(1 << Map::kUseUserObjectComparison));
-  __ b(eq, &miss);
   __ cmp(r3, Operand(known_map_));
   __ b(ne, &miss);
-  __ ldrb(r3, FieldMemOperand(r3, Map::kBitField2Offset));
-  __ and_(r3, r3, Operand(1 << Map::kUseUserObjectComparison));
-  __ cmp(r3, Operand(1 << Map::kUseUserObjectComparison));
-  __ b(eq, &miss);
 
   __ sub(r0, r0, Operand(r1));
   __ Ret();
index f7da6c3..7b08ed8 100644 (file)
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // CPU specific code for arm independent of OS goes here.
+#ifdef __arm__
+#include <sys/syscall.h>  // for cache flushing.
+#endif
 
 #include "v8.h"
 
-#if defined(__arm__)
-  #if !defined(__QNXNTO__)
-    #include <sys/syscall.h>  // for cache flushing.
-  #else
-    #include <sys/mman.h>  // for cache flushing.
-  #endif
-#endif
-
 #if defined(V8_TARGET_ARCH_ARM)
 
 #include "cpu.h"
@@ -69,10 +64,6 @@ void CPU::FlushICache(void* start, size_t size) {
   // None of this code ends up in the snapshot so there are no issues
   // around whether or not to generate the code when building snapshots.
   Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
-#elif defined(__QNXNTO__)
-  // The QNX kernel does not expose the symbol __ARM_NR_cacheflush so we
-  // use the msync system call instead of the approach used on Linux
-  msync(start, size, MS_SYNC|MS_INVALIDATE_ICACHE);
 #else
   // Ideally, we would call
   //   syscall(__ARM_NR_cacheflush, start,
index 96139a2..3e7a1e9 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -125,6 +125,8 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
                      Assembler::kDebugBreakSlotInstructions);
 }
 
+const bool Debug::FramePaddingLayout::kIsSupported = false;
+
 
 #define __ ACCESS_MASM(masm)
 
index 7b2a3c4..699e6aa 100644 (file)
@@ -457,6 +457,8 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
 
 void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
                                               int frame_index) {
+  Builtins* builtins = isolate_->builtins();
+  Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
   JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
   unsigned height = iterator->Next();
   unsigned height_in_bytes = height * kPointerSize;
@@ -464,7 +466,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
     PrintF("  translating construct stub => height=%d\n", height_in_bytes);
   }
 
-  unsigned fixed_frame_size = 7 * kPointerSize;
+  unsigned fixed_frame_size = 8 * kPointerSize;
   unsigned output_frame_size = height_in_bytes + fixed_frame_size;
 
   // Allocate and store the output frame description.
@@ -529,6 +531,15 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
            top_address + output_offset, output_offset, value);
   }
 
+  // The output frame reflects a JSConstructStubGeneric frame.
+  output_offset -= kPointerSize;
+  value = reinterpret_cast<intptr_t>(construct_stub);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; code object\n",
+           top_address + output_offset, output_offset, value);
+  }
+
   // Number of incoming arguments.
   output_offset -= kPointerSize;
   value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
@@ -559,8 +570,6 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
 
   ASSERT(0 == output_offset);
 
-  Builtins* builtins = isolate_->builtins();
-  Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
   uint32_t pc = reinterpret_cast<uint32_t>(
       construct_stub->instruction_start() +
       isolate_->heap()->construct_stub_deopt_pc_offset()->value());
index 1eff534..3c8df29 100644 (file)
@@ -70,6 +70,7 @@ class JumpPatchSite BASE_EMBEDDED {
   // the inlined smi code.
   void EmitJumpIfNotSmi(Register reg, Label* target) {
     ASSERT(!patch_site_.is_bound() && !info_emitted_);
+    Assembler::BlockConstPoolScope block_const_pool(masm_);
     __ bind(&patch_site_);
     __ cmp(reg, Operand(reg));
     // Don't use b(al, ...) as that might emit the constant pool right after the
@@ -82,6 +83,7 @@ class JumpPatchSite BASE_EMBEDDED {
   // the inlined smi code.
   void EmitJumpIfSmi(Register reg, Label* target) {
     ASSERT(!patch_site_.is_bound() && !info_emitted_);
+    Assembler::BlockConstPoolScope block_const_pool(masm_);
     __ bind(&patch_site_);
     __ cmp(reg, Operand(reg));
     __ b(ne, target);  // Never taken before patched.
@@ -180,13 +182,12 @@ void FullCodeGenerator::Generate() {
 
   // Possibly allocate a local context.
   int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
-  if (heap_slots > 0 ||
-      (scope()->is_qml_mode() && scope()->is_global_scope())) {
+  if (heap_slots > 0) {
     Comment cmnt(masm_, "[ Allocate local context");
     // Argument to NewContext is the function, which is in r1.
     __ push(r1);
     if (heap_slots <= FastNewContextStub::kMaximumSlots) {
-      FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
+      FastNewContextStub stub(heap_slots);
       __ CallStub(&stub);
     } else {
       __ CallRuntime(Runtime::kNewFunctionContext, 1);
@@ -267,11 +268,11 @@ void FullCodeGenerator::Generate() {
       // For named function expressions, declare the function name as a
       // constant.
       if (scope()->is_function_scope() && scope()->function() != NULL) {
-        VariableProxy* proxy = scope()->function();
-        ASSERT(proxy->var()->mode() == CONST ||
-               proxy->var()->mode() == CONST_HARMONY);
-        ASSERT(proxy->var()->location() != Variable::UNALLOCATED);
-        EmitDeclaration(proxy, proxy->var()->mode(), NULL);
+        VariableDeclaration* function = scope()->function();
+        ASSERT(function->proxy()->var()->mode() == CONST ||
+               function->proxy()->var()->mode() == CONST_HARMONY);
+        ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
+        VisitVariableDeclaration(function);
       }
       VisitDeclarations(scope()->declarations());
     }
@@ -781,62 +782,51 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
 }
 
 
-void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
-                                        VariableMode mode,
-                                        FunctionLiteral* function) {
+void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
+  // The variable in the declaration always resides in the current function
+  // context.
+  ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+  if (FLAG_debug_code) {
+    // Check that we're not inside a with or catch context.
+    __ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
+    __ CompareRoot(r1, Heap::kWithContextMapRootIndex);
+    __ Check(ne, "Declaration in with context.");
+    __ CompareRoot(r1, Heap::kCatchContextMapRootIndex);
+    __ Check(ne, "Declaration in catch context.");
+  }
+}
+
+
+void FullCodeGenerator::VisitVariableDeclaration(
+    VariableDeclaration* declaration) {
   // If it was not possible to allocate the variable at compile time, we
   // need to "declare" it at runtime to make sure it actually exists in the
   // local context.
+  VariableProxy* proxy = declaration->proxy();
+  VariableMode mode = declaration->mode();
   Variable* variable = proxy->var();
-  bool binding_needs_init = (function == NULL) &&
-      (mode == CONST || mode == CONST_HARMONY || mode == LET);
+  bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
   switch (variable->location()) {
     case Variable::UNALLOCATED:
-      ++global_count_;
+      globals_->Add(variable->name());
+      globals_->Add(variable->binding_needs_init()
+                        ? isolate()->factory()->the_hole_value()
+                        : isolate()->factory()->undefined_value());
       break;
 
     case Variable::PARAMETER:
     case Variable::LOCAL:
-      if (function != NULL) {
-        Comment cmnt(masm_, "[ Declaration");
-        VisitForAccumulatorValue(function);
-        __ str(result_register(), StackOperand(variable));
-      } else if (binding_needs_init) {
-        Comment cmnt(masm_, "[ Declaration");
+      if (hole_init) {
+        Comment cmnt(masm_, "[ VariableDeclaration");
         __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
         __ str(ip, StackOperand(variable));
       }
       break;
 
     case Variable::CONTEXT:
-      // The variable in the decl always resides in the current function
-      // context.
-      ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
-      if (FLAG_debug_code) {
-        // Check that we're not inside a with or catch context.
-        __ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
-        __ CompareRoot(r1, Heap::kWithContextMapRootIndex);
-        __ Check(ne, "Declaration in with context.");
-        __ CompareRoot(r1, Heap::kCatchContextMapRootIndex);
-        __ Check(ne, "Declaration in catch context.");
-      }
-      if (function != NULL) {
-        Comment cmnt(masm_, "[ Declaration");
-        VisitForAccumulatorValue(function);
-        __ str(result_register(), ContextOperand(cp, variable->index()));
-        int offset = Context::SlotOffset(variable->index());
-        // We know that we have written a function, which is not a smi.
-        __ RecordWriteContextSlot(cp,
-                                  offset,
-                                  result_register(),
-                                  r2,
-                                  kLRHasBeenSaved,
-                                  kDontSaveFPRegs,
-                                  EMIT_REMEMBERED_SET,
-                                  OMIT_SMI_CHECK);
-        PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
-      } else if (binding_needs_init) {
-        Comment cmnt(masm_, "[ Declaration");
+      if (hole_init) {
+        Comment cmnt(masm_, "[ VariableDeclaration");
+        EmitDebugCheckDeclarationContext(variable);
         __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
         __ str(ip, ContextOperand(cp, variable->index()));
         // No write barrier since the_hole_value is in old space.
@@ -845,13 +835,11 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
       break;
 
     case Variable::LOOKUP: {
-      Comment cmnt(masm_, "[ Declaration");
+      Comment cmnt(masm_, "[ VariableDeclaration");
       __ mov(r2, Operand(variable->name()));
       // Declaration nodes are always introduced in one of four modes.
-      ASSERT(mode == VAR ||
-             mode == CONST ||
-             mode == CONST_HARMONY ||
-             mode == LET);
+      ASSERT(mode == VAR || mode == LET ||
+             mode == CONST || mode == CONST_HARMONY);
       PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
           ? READ_ONLY : NONE;
       __ mov(r1, Operand(Smi::FromInt(attr)));
@@ -859,11 +847,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
       // Note: For variables we must not push an initial value (such as
       // 'undefined') because we may have a (legal) redeclaration and we
       // must not destroy the current value.
-      if (function != NULL) {
-        __ Push(cp, r2, r1);
-        // Push initial value for function declaration.
-        VisitForStackValue(function);
-      } else if (binding_needs_init) {
+      if (hole_init) {
         __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
         __ Push(cp, r2, r1, r0);
       } else {
@@ -877,6 +861,122 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
 }
 
 
+void FullCodeGenerator::VisitFunctionDeclaration(
+    FunctionDeclaration* declaration) {
+  VariableProxy* proxy = declaration->proxy();
+  Variable* variable = proxy->var();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      globals_->Add(variable->name());
+      Handle<SharedFunctionInfo> function =
+          Compiler::BuildFunctionInfo(declaration->fun(), script());
+      // Check for stack-overflow exception.
+      if (function.is_null()) return SetStackOverflow();
+      globals_->Add(function);
+      break;
+    }
+
+    case Variable::PARAMETER:
+    case Variable::LOCAL: {
+      Comment cmnt(masm_, "[ FunctionDeclaration");
+      VisitForAccumulatorValue(declaration->fun());
+      __ str(result_register(), StackOperand(variable));
+      break;
+    }
+
+    case Variable::CONTEXT: {
+      Comment cmnt(masm_, "[ FunctionDeclaration");
+      EmitDebugCheckDeclarationContext(variable);
+      VisitForAccumulatorValue(declaration->fun());
+      __ str(result_register(), ContextOperand(cp, variable->index()));
+      int offset = Context::SlotOffset(variable->index());
+      // We know that we have written a function, which is not a smi.
+      __ RecordWriteContextSlot(cp,
+                                offset,
+                                result_register(),
+                                r2,
+                                kLRHasBeenSaved,
+                                kDontSaveFPRegs,
+                                EMIT_REMEMBERED_SET,
+                                OMIT_SMI_CHECK);
+      PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+      break;
+    }
+
+    case Variable::LOOKUP: {
+      Comment cmnt(masm_, "[ FunctionDeclaration");
+      __ mov(r2, Operand(variable->name()));
+      __ mov(r1, Operand(Smi::FromInt(NONE)));
+      __ Push(cp, r2, r1);
+      // Push initial value for function declaration.
+      VisitForStackValue(declaration->fun());
+      __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+      break;
+    }
+  }
+}
+
+
+void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
+  VariableProxy* proxy = declaration->proxy();
+  Variable* variable = proxy->var();
+  Handle<JSModule> instance = declaration->module()->interface()->Instance();
+  ASSERT(!instance.is_null());
+
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      Comment cmnt(masm_, "[ ModuleDeclaration");
+      globals_->Add(variable->name());
+      globals_->Add(instance);
+      Visit(declaration->module());
+      break;
+    }
+
+    case Variable::CONTEXT: {
+      Comment cmnt(masm_, "[ ModuleDeclaration");
+      EmitDebugCheckDeclarationContext(variable);
+      __ mov(r1, Operand(instance));
+      __ str(r1, ContextOperand(cp, variable->index()));
+      Visit(declaration->module());
+      break;
+    }
+
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+    case Variable::LOOKUP:
+      UNREACHABLE();
+  }
+}
+
+
+void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
+  VariableProxy* proxy = declaration->proxy();
+  Variable* variable = proxy->var();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED:
+      // TODO(rossberg)
+      break;
+
+    case Variable::CONTEXT: {
+      Comment cmnt(masm_, "[ ImportDeclaration");
+      EmitDebugCheckDeclarationContext(variable);
+      // TODO(rossberg)
+      break;
+    }
+
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+    case Variable::LOOKUP:
+      UNREACHABLE();
+  }
+}
+
+
+void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
+  // TODO(rossberg)
+}
+
+
 void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.
   // The context is the first argument.
@@ -1230,7 +1330,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
     __ bind(&fast);
   }
 
-  __ ldr(r0, var->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+  __ ldr(r0, GlobalObjectOperand());
   __ mov(r2, Operand(var->name()));
   RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
       ? RelocInfo::CODE_TARGET
@@ -1317,7 +1417,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
       Comment cmnt(masm_, "Global variable");
       // Use inline caching. Variable name is passed in r2 and the global
       // object (receiver) in r0.
-      __ ldr(r0, var->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+      __ ldr(r0, GlobalObjectOperand());
       __ mov(r2, Operand(var->name()));
       Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
       CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
@@ -1984,7 +2084,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
   if (var->IsUnallocated()) {
     // Global var, const, or let.
     __ mov(r2, Operand(var->name()));
-    __ ldr(r1, var->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+    __ ldr(r1, GlobalObjectOperand());
     Handle<Code> ic = is_classic_mode()
         ? isolate()->builtins()->StoreIC_Initialize()
         : isolate()->builtins()->StoreIC_Initialize_Strict();
@@ -2263,6 +2363,18 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
   }
   // Record source position for debugger.
   SetSourcePosition(expr->position());
+
+  // Record call targets in unoptimized code, but not in the snapshot.
+  if (!Serializer::enabled()) {
+    flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
+    Handle<Object> uninitialized =
+        TypeFeedbackCells::UninitializedSentinel(isolate());
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+    RecordTypeFeedbackCell(expr->id(), cell);
+    __ mov(r2, Operand(cell));
+  }
+
   CallFunctionStub stub(arg_count, flags);
   __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
   __ CallStub(&stub);
@@ -2294,12 +2406,8 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
   __ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
   __ push(r1);
 
-  // Push the qml mode flag.
-  __ mov(r1, Operand(Smi::FromInt(is_qml_mode())));
-  __ push(r1);
-
   // Do the runtime call.
-  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
+  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
 }
 
 
@@ -2356,7 +2464,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
     context()->DropAndPlug(1, r0);
   } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
     // Push global object as receiver for the call IC.
-    __ ldr(r0, proxy->var()->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+    __ ldr(r0, GlobalObjectOperand());
     __ push(r0);
     EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
   } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@@ -3358,104 +3466,6 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
 }
 
 
-void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 3);
-  VisitForStackValue(args->at(0));
-  VisitForStackValue(args->at(1));
-  VisitForStackValue(args->at(2));
-  Label done;
-  Label slow_case;
-  Register object = r0;
-  Register index1 = r1;
-  Register index2 = r2;
-  Register elements = r3;
-  Register scratch1 = r4;
-  Register scratch2 = r5;
-
-  __ ldr(object, MemOperand(sp, 2 * kPointerSize));
-  // Fetch the map and check if array is in fast case.
-  // Check that object doesn't require security checks and
-  // has no indexed interceptor.
-  __ CompareObjectType(object, scratch1, scratch2, JS_ARRAY_TYPE);
-  __ b(ne, &slow_case);
-  // Map is now in scratch1.
-
-  __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
-  __ tst(scratch2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
-  __ b(ne, &slow_case);
-
-  // Check the object's elements are in fast case and writable.
-  __ ldr(elements, FieldMemOperand(object, JSObject::kElementsOffset));
-  __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
-  __ cmp(scratch1, ip);
-  __ b(ne, &slow_case);
-
-  // Check that both indices are smis.
-  __ ldr(index1, MemOperand(sp, 1 * kPointerSize));
-  __ ldr(index2, MemOperand(sp, 0));
-  __ JumpIfNotBothSmi(index1, index2, &slow_case);
-
-  // Check that both indices are valid.
-  __ ldr(scratch1, FieldMemOperand(object, JSArray::kLengthOffset));
-  __ cmp(scratch1, index1);
-  __ cmp(scratch1, index2, hi);
-  __ b(ls, &slow_case);
-
-  // Bring the address of the elements into index1 and index2.
-  __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ add(index1,
-         scratch1,
-         Operand(index1, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ add(index2,
-         scratch1,
-         Operand(index2, LSL, kPointerSizeLog2 - kSmiTagSize));
-
-  // Swap elements.
-  __ ldr(scratch1, MemOperand(index1, 0));
-  __ ldr(scratch2, MemOperand(index2, 0));
-  __ str(scratch1, MemOperand(index2, 0));
-  __ str(scratch2, MemOperand(index1, 0));
-
-  Label no_remembered_set;
-  __ CheckPageFlag(elements,
-                   scratch1,
-                   1 << MemoryChunk::SCAN_ON_SCAVENGE,
-                   ne,
-                   &no_remembered_set);
-  // Possible optimization: do a check that both values are Smis
-  // (or them and test against Smi mask.)
-
-  // We are swapping two objects in an array and the incremental marker never
-  // pauses in the middle of scanning a single object.  Therefore the
-  // incremental marker is not disturbed, so we don't need to call the
-  // RecordWrite stub that notifies the incremental marker.
-  __ RememberedSetHelper(elements,
-                         index1,
-                         scratch2,
-                         kDontSaveFPRegs,
-                         MacroAssembler::kFallThroughAtEnd);
-  __ RememberedSetHelper(elements,
-                         index2,
-                         scratch2,
-                         kDontSaveFPRegs,
-                         MacroAssembler::kFallThroughAtEnd);
-
-  __ bind(&no_remembered_set);
-  // We are done. Drop elements from the stack, and return undefined.
-  __ Drop(3);
-  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-  __ jmp(&done);
-
-  __ bind(&slow_case);
-  __ CallRuntime(Runtime::kSwapElements, 3);
-
-  __ bind(&done);
-  context()->Plug(r0);
-}
-
-
 void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
@@ -3878,7 +3888,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
         // but "delete this" is allowed.
         ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
         if (var->IsUnallocated()) {
-          __ ldr(r2, var->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+          __ ldr(r2, GlobalObjectOperand());
           __ mov(r1, Operand(var->name()));
           __ mov(r0, Operand(Smi::FromInt(kNonStrictMode)));
           __ Push(r2, r1, r0);
@@ -4181,7 +4191,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
   VariableProxy* proxy = expr->AsVariableProxy();
   if (proxy != NULL && proxy->var()->IsUnallocated()) {
     Comment cmnt(masm_, "Global variable");
-    __ ldr(r0, proxy->var()->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+    __ ldr(r0, GlobalObjectOperand());
     __ mov(r2, Operand(proxy->name()));
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
     // Use a regular load, not a contextual load, to avoid a reference
@@ -4451,7 +4461,8 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
 
 void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
   Scope* declaration_scope = scope()->DeclarationScope();
-  if (declaration_scope->is_global_scope()) {
+  if (declaration_scope->is_global_scope() ||
+      declaration_scope->is_module_scope()) {
     // Contexts nested in the global context have a canonical empty function
     // as their closure, not the anonymous closure containing the global
     // code.  Pass a smi sentinel and let the runtime look up the empty
index e843657..c12c167 100644 (file)
@@ -774,7 +774,7 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
   __ b(lt, slow_case);
 
   // Check that the key is a positive smi.
-  __ tst(key, Operand(0x8000001));
+  __ tst(key, Operand(0x80000001));
   __ b(ne, slow_case);
 
   // Load the elements into scratch1 and check its map.
@@ -1690,12 +1690,12 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
 
   // Activate inlined smi code.
   if (previous_state == UNINITIALIZED) {
-    PatchInlinedSmiCode(address());
+    PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
   }
 }
 
 
-void PatchInlinedSmiCode(Address address) {
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
   Address cmp_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
 
@@ -1729,34 +1729,31 @@ void PatchInlinedSmiCode(Address address) {
   Instr instr_at_patch = Assembler::instr_at(patch_address);
   Instr branch_instr =
       Assembler::instr_at(patch_address + Instruction::kInstrSize);
-  ASSERT(Assembler::IsCmpRegister(instr_at_patch));
-  ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
-            Assembler::GetRm(instr_at_patch).code());
+  // This is patching a conditional "jump if not smi/jump if smi" site.
+  // Enabling by changing from
+  //   cmp rx, rx
+  //   b eq/ne, <target>
+  // to
+  //   tst rx, #kSmiTagMask
+  //   b ne/eq, <target>
+  // and vice-versa to be disabled again.
+  CodePatcher patcher(patch_address, 2);
+  Register reg = Assembler::GetRn(instr_at_patch);
+  if (check == ENABLE_INLINED_SMI_CHECK) {
+    ASSERT(Assembler::IsCmpRegister(instr_at_patch));
+    ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
+              Assembler::GetRm(instr_at_patch).code());
+    patcher.masm()->tst(reg, Operand(kSmiTagMask));
+  } else {
+    ASSERT(check == DISABLE_INLINED_SMI_CHECK);
+    ASSERT(Assembler::IsTstImmediate(instr_at_patch));
+    patcher.masm()->cmp(reg, reg);
+  }
   ASSERT(Assembler::IsBranch(branch_instr));
   if (Assembler::GetCondition(branch_instr) == eq) {
-    // This is patching a "jump if not smi" site to be active.
-    // Changing
-    //   cmp rx, rx
-    //   b eq, <target>
-    // to
-    //   tst rx, #kSmiTagMask
-    //   b ne, <target>
-    CodePatcher patcher(patch_address, 2);
-    Register reg = Assembler::GetRn(instr_at_patch);
-    patcher.masm()->tst(reg, Operand(kSmiTagMask));
     patcher.EmitCondition(ne);
   } else {
     ASSERT(Assembler::GetCondition(branch_instr) == ne);
-    // This is patching a "jump if smi" site to be active.
-    // Changing
-    //   cmp rx, rx
-    //   b ne, <target>
-    // to
-    //   tst rx, #kSmiTagMask
-    //   b eq, <target>
-    CodePatcher patcher(patch_address, 2);
-    Register reg = Assembler::GetRn(instr_at_patch);
-    patcher.masm()->tst(reg, Operand(kSmiTagMask));
     patcher.EmitCondition(eq);
   }
 }
index 31fd4ad..5c60f53 100644 (file)
@@ -108,22 +108,17 @@ void LInstruction::PrintTo(StringStream* stream) {
 }
 
 
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
+void LInstruction::PrintDataTo(StringStream* stream) {
   stream->Add("= ");
-  for (int i = 0; i < inputs_.length(); i++) {
+  for (int i = 0; i < InputCount(); i++) {
     if (i > 0) stream->Add(" ");
-    inputs_[i]->PrintTo(stream);
+    InputAt(i)->PrintTo(stream);
   }
 }
 
 
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
-  for (int i = 0; i < results_.length(); i++) {
-    if (i > 0) stream->Add(" ");
-    results_[i]->PrintTo(stream);
-  }
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+  if (HasResult()) result()->PrintTo(stream);
 }
 
 
@@ -732,22 +727,6 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
 }
 
 
-LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
-    LInstruction* instr, int ast_id) {
-  ASSERT(instruction_pending_deoptimization_environment_ == NULL);
-  ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
-  instruction_pending_deoptimization_environment_ = instr;
-  pending_deoptimization_ast_id_ = ast_id;
-  return instr;
-}
-
-
-void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
-  instruction_pending_deoptimization_environment_ = NULL;
-  pending_deoptimization_ast_id_ = AstNode::kNoNumber;
-}
-
-
 LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
                                         HInstruction* hinstr,
                                         CanDeoptimize can_deoptimize) {
@@ -760,8 +739,10 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
   if (hinstr->HasObservableSideEffects()) {
     ASSERT(hinstr->next()->IsSimulate());
     HSimulate* sim = HSimulate::cast(hinstr->next());
-    instr = SetInstructionPendingDeoptimizationEnvironment(
-        instr, sim->ast_id());
+    ASSERT(instruction_pending_deoptimization_environment_ == NULL);
+    ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+    instruction_pending_deoptimization_environment_ = instr;
+    pending_deoptimization_ast_id_ = sim->ast_id();
   }
 
   // If instruction does not have side-effects lazy deoptimization
@@ -779,12 +760,6 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
 }
 
 
-LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
-  instr->MarkAsSaveDoubles();
-  return instr;
-}
-
-
 LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
   ASSERT(!instr->HasPointerMap());
   instr->set_pointer_map(new(zone()) LPointerMap(position_));
@@ -1151,7 +1126,7 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
 
 LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LGlobalObject(context, instr->qml_global()));
+  return DefineAsRegister(new(zone()) LGlobalObject(context));
 }
 
 
@@ -1224,7 +1199,7 @@ LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
 
 LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new(zone()) LCallGlobal(instr->qml_global()), r0), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, r0), instr);
 }
 
 
@@ -1295,6 +1270,7 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
 LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
   ASSERT(instr->value()->representation().IsInteger32());
   ASSERT(instr->representation().IsInteger32());
+  if (instr->HasNoUses()) return NULL;
   LOperand* value = UseRegisterAtStart(instr->value());
   return DefineAsRegister(new(zone()) LBitNotI(value));
 }
@@ -1319,6 +1295,75 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
 }
 
 
+bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
+  uint32_t divisor_abs = abs(divisor);
+  // Dividing by 0, 1, and powers of 2 is easy.
+  // Note that IsPowerOf2(0) returns true;
+  ASSERT(IsPowerOf2(0) == true);
+  if (IsPowerOf2(divisor_abs)) return true;
+
+  // We have magic numbers for a few specific divisors.
+  // Details and proofs can be found in:
+  // - Hacker's Delight, Henry S. Warren, Jr.
+  // - The PowerPC Compiler Writer’s Guide
+  // and probably many others.
+  //
+  // We handle
+  //   <divisor with magic numbers> * <power of 2>
+  // but not
+  //   <divisor with magic numbers> * <other divisor with magic numbers>
+  int32_t power_of_2_factor =
+    CompilerIntrinsics::CountTrailingZeros(divisor_abs);
+  DivMagicNumbers magic_numbers =
+    DivMagicNumberFor(divisor_abs >> power_of_2_factor);
+  if (magic_numbers.M != InvalidDivMagicNumber.M) return true;
+
+  return false;
+}
+
+
+HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
+  // A value with an integer representation does not need to be transformed.
+  if (dividend->representation().IsInteger32()) {
+    return dividend;
+  // A change from an integer32 can be replaced by the integer32 value.
+  } else if (dividend->IsChange() &&
+      HChange::cast(dividend)->from().IsInteger32()) {
+    return HChange::cast(dividend)->value();
+  }
+  return NULL;
+}
+
+
+HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
+  // Only optimize when we have magic numbers for the divisor.
+  // The standard integer division routine is usually slower than transitionning
+  // to VFP.
+  if (divisor->IsConstant() &&
+      HConstant::cast(divisor)->HasInteger32Value()) {
+    HConstant* constant_val = HConstant::cast(divisor);
+    int32_t int32_val = constant_val->Integer32Value();
+    if (LChunkBuilder::HasMagicNumberForDivisor(int32_val)) {
+      return constant_val->CopyToRepresentation(Representation::Integer32());
+    }
+  }
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+    HValue* right = instr->right();
+    LOperand* dividend = UseRegister(instr->left());
+    LOperand* divisor = UseRegisterOrConstant(right);
+    LOperand* remainder = TempRegister();
+    ASSERT(right->IsConstant() &&
+           HConstant::cast(right)->HasInteger32Value() &&
+           HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value()));
+    return AssignEnvironment(DefineAsRegister(
+          new LMathFloorOfDiv(dividend, divisor, remainder)));
+}
+
+
 LInstruction* LChunkBuilder::DoMod(HMod* instr) {
   if (instr->representation().IsInteger32()) {
     ASSERT(instr->left()->representation().IsInteger32());
@@ -2242,9 +2287,12 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
   if (pending_deoptimization_ast_id_ == instr->ast_id()) {
     LInstruction* result = new(zone()) LLazyBailout;
     result = AssignEnvironment(result);
+    // Store the lazy deopt environment with the instruction if needed. Right
+    // now it is only used for LInstanceOfKnownGlobal.
     instruction_pending_deoptimization_environment_->
-        set_deoptimization_environment(result->environment());
-    ClearInstructionPendingDeoptimizationEnvironment();
+        SetDeferredLazyDeoptimizationEnvironment(result->environment());
+    instruction_pending_deoptimization_environment_ = NULL;
+    pending_deoptimization_ast_id_ = AstNode::kNoNumber;
     return result;
   }
 
@@ -2271,8 +2319,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
                                                undefined,
                                                instr->call_kind(),
                                                instr->is_construct());
-  if (instr->arguments() != NULL) {
-    inner->Bind(instr->arguments(), graph()->GetArgumentsObject());
+  if (instr->arguments_var() != NULL) {
+    inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
   }
   current_block_->UpdateEnvironment(inner);
   chunk_->AddInlinedClosure(instr->closure());
@@ -2281,10 +2329,21 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
 
 
 LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+  LInstruction* pop = NULL;
+
+  HEnvironment* env = current_block_->last_environment();
+
+  if (instr->arguments_pushed()) {
+    int argument_count = env->arguments_environment()->parameter_count();
+    pop = new(zone()) LDrop(argument_count);
+    argument_count_ -= argument_count;
+  }
+
   HEnvironment* outer = current_block_->last_environment()->
       DiscardInlined(false);
   current_block_->UpdateEnvironment(outer);
-  return NULL;
+
+  return pop;
 }
 
 
index 30a7bf5..dbae813 100644 (file)
@@ -132,6 +132,7 @@ class LCodeGen;
   V(LoadNamedField)                             \
   V(LoadNamedFieldPolymorphic)                  \
   V(LoadNamedGeneric)                           \
+  V(MathFloorOfDiv)                             \
   V(ModI)                                       \
   V(MulI)                                       \
   V(NumberTagD)                                 \
@@ -179,7 +180,8 @@ class LCodeGen;
   V(CheckMapValue)                              \
   V(LoadFieldByIndex)                           \
   V(DateField)                                  \
-  V(WrapReceiver)
+  V(WrapReceiver)                               \
+  V(Drop)
 
 
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
@@ -203,15 +205,14 @@ class LInstruction: public ZoneObject {
   LInstruction()
       :  environment_(NULL),
          hydrogen_value_(NULL),
-         is_call_(false),
-         is_save_doubles_(false) { }
+         is_call_(false) { }
   virtual ~LInstruction() { }
 
   virtual void CompileToNative(LCodeGen* generator) = 0;
   virtual const char* Mnemonic() const = 0;
   virtual void PrintTo(StringStream* stream);
-  virtual void PrintDataTo(StringStream* stream) = 0;
-  virtual void PrintOutputOperandTo(StringStream* stream) = 0;
+  virtual void PrintDataTo(StringStream* stream);
+  virtual void PrintOutputOperandTo(StringStream* stream);
 
   enum Opcode {
     // Declare a unique enum value for each instruction.
@@ -246,22 +247,12 @@ class LInstruction: public ZoneObject {
   void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
   HValue* hydrogen_value() const { return hydrogen_value_; }
 
-  void set_deoptimization_environment(LEnvironment* env) {
-    deoptimization_environment_.set(env);
-  }
-  LEnvironment* deoptimization_environment() const {
-    return deoptimization_environment_.get();
-  }
-  bool HasDeoptimizationEnvironment() const {
-    return deoptimization_environment_.is_set();
-  }
+  virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
 
   void MarkAsCall() { is_call_ = true; }
-  void MarkAsSaveDoubles() { is_save_doubles_ = true; }
 
   // Interface to the register allocator and iterators.
   bool IsMarkedAsCall() const { return is_call_; }
-  bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
 
   virtual bool HasResult() const = 0;
   virtual LOperand* result() = 0;
@@ -282,9 +273,7 @@ class LInstruction: public ZoneObject {
   LEnvironment* environment_;
   SetOncePointer<LPointerMap> pointer_map_;
   HValue* hydrogen_value_;
-  SetOncePointer<LEnvironment> deoptimization_environment_;
   bool is_call_;
-  bool is_save_doubles_;
 };
 
 
@@ -306,9 +295,6 @@ class LTemplateInstruction: public LInstruction {
   int TempCount() { return T; }
   LOperand* TempAt(int i) { return temps_[i]; }
 
-  virtual void PrintDataTo(StringStream* stream);
-  virtual void PrintOutputOperandTo(StringStream* stream);
-
  protected:
   EmbeddedContainer<LOperand*, R> results_;
   EmbeddedContainer<LOperand*, I> inputs_;
@@ -534,9 +520,8 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
 
 class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
  public:
-  LArgumentsElements() { }
-
   DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+  DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
 };
 
 
@@ -582,6 +567,21 @@ class LDivI: public LTemplateInstruction<1, 2, 0> {
 };
 
 
+class LMathFloorOfDiv: public LTemplateInstruction<1, 2, 1> {
+ public:
+  LMathFloorOfDiv(LOperand* left,
+                  LOperand* right,
+                  LOperand* temp = NULL) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
 class LMulI: public LTemplateInstruction<1, 2, 1> {
  public:
   LMulI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -834,6 +834,15 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
   DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
 
   Handle<JSFunction> function() const { return hydrogen()->function(); }
+  LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
+    return lazy_deopt_env_;
+  }
+  virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
+    lazy_deopt_env_ = env;
+  }
+
+ private:
+  LEnvironment* lazy_deopt_env_;
 };
 
 
@@ -1227,6 +1236,7 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
 
   LOperand* elements() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
+  uint32_t additional_index() const { return hydrogen()->index_offset(); }
 };
 
 
@@ -1243,13 +1253,13 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
 
   LOperand* elements() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
+  uint32_t additional_index() const { return hydrogen()->index_offset(); }
 };
 
 
 class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
  public:
-  LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
-                                    LOperand* key) {
+  LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
     inputs_[0] = external_pointer;
     inputs_[1] = key;
   }
@@ -1263,6 +1273,7 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
   ElementsKind elements_kind() const {
     return hydrogen()->elements_kind();
   }
+  uint32_t additional_index() const { return hydrogen()->index_offset(); }
 };
 
 
@@ -1378,6 +1389,19 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
 };
 
 
+class LDrop: public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LDrop(int count) : count_(count) { }
+
+  int count() const { return count_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+  int count_;
+};
+
+
 class LThisFunction: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
@@ -1412,17 +1436,13 @@ class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
 
 class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
  public:
-  explicit LGlobalObject(LOperand* context, bool qml_global) {
+  explicit LGlobalObject(LOperand* context) {
     inputs_[0] = context;
-    qml_global_ = qml_global;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
 
   LOperand* context() { return InputAt(0); }
-  bool qml_global() { return qml_global_; }
- private:
-  bool qml_global_;
 };
 
 
@@ -1464,6 +1484,7 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
   virtual void PrintDataTo(StringStream* stream);
 
   int arity() const { return hydrogen()->argument_count() - 1; }
+  Handle<JSFunction> known_function() { return hydrogen()->known_function(); }
 };
 
 
@@ -1514,16 +1535,10 @@ class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
   DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
 
-  explicit LCallGlobal(bool qml_global) : qml_global_(qml_global) {}
-
   virtual void PrintDataTo(StringStream* stream);
 
   Handle<String> name() const {return hydrogen()->name(); }
   int arity() const { return hydrogen()->argument_count() - 1; }
-
-  bool qml_global() { return qml_global_; }
- private:
-  bool qml_global_;
 };
 
 
@@ -1727,6 +1742,7 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
   LOperand* object() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
+  uint32_t additional_index() const { return hydrogen()->index_offset(); }
 };
 
 
@@ -1749,6 +1765,9 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
   LOperand* elements() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
+  uint32_t additional_index() const { return hydrogen()->index_offset(); }
+
+  bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
 };
 
 
@@ -1791,6 +1810,7 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
   ElementsKind elements_kind() const {
     return hydrogen()->elements_kind();
   }
+  uint32_t additional_index() const { return hydrogen()->index_offset(); }
 };
 
 
@@ -2284,6 +2304,10 @@ class LChunkBuilder BASE_EMBEDDED {
   HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
 #undef DECLARE_DO
 
+  static bool HasMagicNumberForDivisor(int32_t divisor);
+  static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
+  static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
+
  private:
   enum Status {
     UNUSED,
@@ -2379,11 +2403,6 @@ class LChunkBuilder BASE_EMBEDDED {
       LInstruction* instr,
       HInstruction* hinstr,
       CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
-  LInstruction* MarkAsSaveDoubles(LInstruction* instr);
-
-  LInstruction* SetInstructionPendingDeoptimizationEnvironment(
-      LInstruction* instr, int ast_id);
-  void ClearInstructionPendingDeoptimizationEnvironment();
 
   LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
                                   int* argument_index_accumulator);
index 045814f..d224d24 100644 (file)
@@ -174,13 +174,12 @@ bool LCodeGen::GeneratePrologue() {
 
   // Possibly allocate a local context.
   int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
-  if (heap_slots > 0 ||
-      (scope()->is_qml_mode() && scope()->is_global_scope())) {
+  if (heap_slots > 0) {
     Comment(";;; Allocate local context");
     // Argument to NewContext is the function, which is in r1.
     __ push(r1);
     if (heap_slots <= FastNewContextStub::kMaximumSlots) {
-      FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
+      FastNewContextStub stub(heap_slots);
       __ CallStub(&stub);
     } else {
       __ CallRuntime(Runtime::kNewFunctionContext, 1);
@@ -1035,6 +1034,100 @@ void LCodeGen::DoModI(LModI* instr) {
 }
 
 
+void LCodeGen::EmitSignedIntegerDivisionByConstant(
+    Register result,
+    Register dividend,
+    int32_t divisor,
+    Register remainder,
+    Register scratch,
+    LEnvironment* environment) {
+  ASSERT(!AreAliased(dividend, scratch, ip));
+  ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
+
+  uint32_t divisor_abs = abs(divisor);
+
+  int32_t power_of_2_factor =
+    CompilerIntrinsics::CountTrailingZeros(divisor_abs);
+
+  switch (divisor_abs) {
+    case 0:
+      DeoptimizeIf(al, environment);
+      return;
+
+    case 1:
+      if (divisor > 0) {
+        __ Move(result, dividend);
+      } else {
+        __ rsb(result, dividend, Operand(0), SetCC);
+        DeoptimizeIf(vs, environment);
+      }
+      // Compute the remainder.
+      __ mov(remainder, Operand(0));
+      return;
+
+    default:
+      if (IsPowerOf2(divisor_abs)) {
+        // Branch and condition free code for integer division by a power
+        // of two.
+        int32_t power = WhichPowerOf2(divisor_abs);
+        if (power > 1) {
+          __ mov(scratch, Operand(dividend, ASR, power - 1));
+        }
+        __ add(scratch, dividend, Operand(scratch, LSR, 32 - power));
+        __ mov(result, Operand(scratch, ASR, power));
+        // Negate if necessary.
+        // We don't need to check for overflow because the case '-1' is
+        // handled separately.
+        if (divisor < 0) {
+          ASSERT(divisor != -1);
+          __ rsb(result, result, Operand(0));
+        }
+        // Compute the remainder.
+        if (divisor > 0) {
+          __ sub(remainder, dividend, Operand(result, LSL, power));
+        } else {
+          __ add(remainder, dividend, Operand(result, LSL, power));
+        }
+        return;
+      } else {
+        // Use magic numbers for a few specific divisors.
+        // Details and proofs can be found in:
+        // - Hacker's Delight, Henry S. Warren, Jr.
+        // - The PowerPC Compiler Writer’s Guide
+        // and probably many others.
+        //
+        // We handle
+        //   <divisor with magic numbers> * <power of 2>
+        // but not
+        //   <divisor with magic numbers> * <other divisor with magic numbers>
+        DivMagicNumbers magic_numbers =
+          DivMagicNumberFor(divisor_abs >> power_of_2_factor);
+        // Branch and condition free code for integer division by a power
+        // of two.
+        const int32_t M = magic_numbers.M;
+        const int32_t s = magic_numbers.s + power_of_2_factor;
+
+        __ mov(ip, Operand(M));
+        __ smull(ip, scratch, dividend, ip);
+        if (M < 0) {
+          __ add(scratch, scratch, Operand(dividend));
+        }
+        if (s > 0) {
+          __ mov(scratch, Operand(scratch, ASR, s));
+        }
+        __ add(result, scratch, Operand(dividend, LSR, 31));
+        if (divisor < 0) __ rsb(result, result, Operand(0));
+        // Compute the remainder.
+        __ mov(ip, Operand(divisor));
+        // This sequence could be replaced with 'mls' when
+        // it gets implemented.
+        __ mul(scratch, result, ip);
+        __ sub(remainder, dividend, scratch);
+      }
+  }
+}
+
+
 void LCodeGen::DoDivI(LDivI* instr) {
   class DeferredDivI: public LDeferredCode {
    public:
@@ -1116,6 +1209,34 @@ void LCodeGen::DoDivI(LDivI* instr) {
 }
 
 
+void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
+  const Register result = ToRegister(instr->result());
+  const Register left = ToRegister(instr->InputAt(0));
+  const Register remainder = ToRegister(instr->TempAt(0));
+  const Register scratch = scratch0();
+
+  // We only optimize this for division by constants, because the standard
+  // integer division routine is usually slower than transitionning to VFP.
+  // This could be optimized on processors with SDIV available.
+  ASSERT(instr->InputAt(1)->IsConstantOperand());
+  int32_t divisor = ToInteger32(LConstantOperand::cast(instr->InputAt(1)));
+  if (divisor < 0) {
+    __ cmp(left, Operand(0));
+    DeoptimizeIf(eq, instr->environment());
+  }
+  EmitSignedIntegerDivisionByConstant(result,
+                                      left,
+                                      divisor,
+                                      remainder,
+                                      scratch,
+                                      instr->environment());
+  // We operated a truncating division. Correct the result if necessary.
+  __ cmp(remainder, Operand(0));
+  __ teq(remainder, Operand(divisor), ne);
+  __ sub(result, result, Operand(1), LeaveCC, mi);
+}
+
+
 template<int T>
 void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
                                       Token::Value op) {
@@ -2268,8 +2389,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                   RelocInfo::CODE_TARGET,
                   instr,
                   RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
-  ASSERT(instr->HasDeoptimizationEnvironment());
-  LEnvironment* env = instr->deoptimization_environment();
+  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   // Put the result value into the result register slot and
   // restore all registers.
@@ -2467,42 +2587,38 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
   Register object = ToRegister(instr->object());
   Register result = ToRegister(instr->result());
   Register scratch = scratch0();
+
   int map_count = instr->hydrogen()->types()->length();
+  bool need_generic = instr->hydrogen()->need_generic();
+
+  if (map_count == 0 && !need_generic) {
+    DeoptimizeIf(al, instr->environment());
+    return;
+  }
   Handle<String> name = instr->hydrogen()->name();
-  if (map_count == 0) {
-    ASSERT(instr->hydrogen()->need_generic());
-    __ mov(r2, Operand(name));
-    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-    CallCode(ic, RelocInfo::CODE_TARGET, instr);
-  } else {
-    Label done;
-    __ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
-    for (int i = 0; i < map_count - 1; ++i) {
-      Handle<Map> map = instr->hydrogen()->types()->at(i);
+  Label done;
+  __ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+  for (int i = 0; i < map_count; ++i) {
+    bool last = (i == map_count - 1);
+    Handle<Map> map = instr->hydrogen()->types()->at(i);
+    __ cmp(scratch, Operand(map));
+    if (last && !need_generic) {
+      DeoptimizeIf(ne, instr->environment());
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
+    } else {
       Label next;
-      __ cmp(scratch, Operand(map));
       __ b(ne, &next);
       EmitLoadFieldOrConstantFunction(result, object, map, name);
       __ b(&done);
       __ bind(&next);
     }
-    Handle<Map> map = instr->hydrogen()->types()->last();
-    __ cmp(scratch, Operand(map));
-    if (instr->hydrogen()->need_generic()) {
-      Label generic;
-      __ b(ne, &generic);
-      EmitLoadFieldOrConstantFunction(result, object, map, name);
-      __ b(&done);
-      __ bind(&generic);
-      __ mov(r2, Operand(name));
-      Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-      CallCode(ic, RelocInfo::CODE_TARGET, instr);
-    } else {
-      DeoptimizeIf(ne, instr->environment());
-      EmitLoadFieldOrConstantFunction(result, object, map, name);
-    }
-    __ bind(&done);
   }
+  if (need_generic) {
+    __ mov(r2, Operand(name));
+    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+    CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  }
+  __ bind(&done);
 }
 
 
@@ -2628,7 +2744,9 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
 
   // Load the result.
   __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
-  __ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+  uint32_t offset = FixedArray::kHeaderSize +
+                    (instr->additional_index() << kPointerSizeLog2);
+  __ ldr(result, FieldMemOperand(scratch, offset));
 
   // Check for the hole value.
   if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -2660,13 +2778,14 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
   }
 
   Operand operand = key_is_constant
-      ? Operand(constant_key * (1 << shift_size) +
+      ? Operand(((constant_key + instr->additional_index()) << shift_size) +
                 FixedDoubleArray::kHeaderSize - kHeapObjectTag)
       : Operand(key, LSL, shift_size);
   __ add(elements, elements, operand);
   if (!key_is_constant) {
     __ add(elements, elements,
-           Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+           Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
+                   (instr->additional_index() << shift_size)));
   }
 
   __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
@@ -2693,26 +2812,33 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
     key = ToRegister(instr->key());
   }
   int shift_size = ElementsKindToShiftSize(elements_kind);
+  int additional_offset = instr->additional_index() << shift_size;
 
   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
       elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
     CpuFeatures::Scope scope(VFP3);
     DwVfpRegister result = ToDoubleRegister(instr->result());
     Operand operand = key_is_constant
-        ? Operand(constant_key * (1 << shift_size))
+        ? Operand(constant_key << shift_size)
         : Operand(key, LSL, shift_size);
     __ add(scratch0(), external_pointer, operand);
     if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
-      __ vldr(result.low(), scratch0(), 0);
+      __ vldr(result.low(), scratch0(), additional_offset);
       __ vcvt_f64_f32(result, result.low());
     } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
-      __ vldr(result, scratch0(), 0);
+      __ vldr(result, scratch0(), additional_offset);
     }
   } else {
     Register result = ToRegister(instr->result());
+    if (instr->additional_index() != 0 && !key_is_constant) {
+      __ add(scratch0(), key, Operand(instr->additional_index()));
+    }
     MemOperand mem_operand(key_is_constant
-        ? MemOperand(external_pointer, constant_key * (1 << shift_size))
-        : MemOperand(external_pointer, key, LSL, shift_size));
+        ? MemOperand(external_pointer,
+                     (constant_key << shift_size) + additional_offset)
+        : (instr->additional_index() == 0
+           ? MemOperand(external_pointer, key, LSL, shift_size)
+           : MemOperand(external_pointer, scratch0(), LSL, shift_size)));
     switch (elements_kind) {
       case EXTERNAL_BYTE_ELEMENTS:
         __ ldrsb(result, mem_operand);
@@ -2765,16 +2891,20 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   Register scratch = scratch0();
   Register result = ToRegister(instr->result());
 
-  // Check if the calling frame is an arguments adaptor frame.
-  Label done, adapted;
-  __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
-  __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  if (instr->hydrogen()->from_inlined()) {
+    __ sub(result, sp, Operand(2 * kPointerSize));
+  } else {
+    // Check if the calling frame is an arguments adaptor frame.
+    Label done, adapted;
+    __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
+    __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
 
-  // Result is the frame pointer for the frame if not adapted and for the real
-  // frame below the adaptor frame if adapted.
-  __ mov(result, fp, LeaveCC, ne);
-  __ mov(result, scratch, LeaveCC, eq);
+    // Result is the frame pointer for the frame if not adapted and for the real
+    // frame below the adaptor frame if adapted.
+    __ mov(result, fp, LeaveCC, ne);
+    __ mov(result, scratch, LeaveCC, eq);
+  }
 }
 
 
@@ -2883,7 +3013,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
   __ b(ne, &loop);
 
   __ bind(&invoke);
-  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  ASSERT(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
   SafepointGenerator safepoint_generator(
@@ -2908,6 +3038,11 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
 }
 
 
+void LCodeGen::DoDrop(LDrop* instr) {
+  __ Drop(instr->count());
+}
+
+
 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   Register result = ToRegister(instr->result());
   __ LoadHeapObject(result, instr->hydrogen()->closure());
@@ -2940,7 +3075,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
 
 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
   Register result = ToRegister(instr->result());
-  __ ldr(result, ContextOperand(cp, instr->qml_global()?Context::QML_GLOBAL_INDEX:Context::GLOBAL_INDEX));
+  __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
 }
 
 
@@ -2954,7 +3089,8 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
                                  int arity,
                                  LInstruction* instr,
-                                 CallKind call_kind) {
+                                 CallKind call_kind,
+                                 R1State r1_state) {
   bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
       function->shared()->formal_parameter_count() == arity;
 
@@ -2962,7 +3098,10 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
   RecordPosition(pointers->position());
 
   if (can_invoke_directly) {
-    __ LoadHeapObject(r1, function);
+    if (r1_state == R1_UNINITIALIZED) {
+      __ LoadHeapObject(r1, function);
+    }
+
     // Change context if needed.
     bool change_context =
         (info()->closure()->context() != function->context()) ||
@@ -3001,7 +3140,8 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   CallKnownFunction(instr->function(),
                     instr->arity(),
                     instr,
-                    CALL_AS_METHOD);
+                    CALL_AS_METHOD,
+                    R1_UNINITIALIZED);
 }
 
 
@@ -3425,13 +3565,21 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
   ASSERT(ToRegister(instr->function()).is(r1));
   ASSERT(instr->HasPointerMap());
-  ASSERT(instr->HasDeoptimizationEnvironment());
-  LPointerMap* pointers = instr->pointer_map();
-  RecordPosition(pointers->position());
-  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-  ParameterCount count(instr->arity());
-  __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
-  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+  if (instr->known_function().is_null()) {
+    LPointerMap* pointers = instr->pointer_map();
+    RecordPosition(pointers->position());
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(instr->arity());
+    __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+    __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  } else {
+    CallKnownFunction(instr->known_function(),
+                      instr->arity(),
+                      instr,
+                      CALL_AS_METHOD,
+                      R1_CONTAINS_TARGET);
+  }
 }
 
 
@@ -3486,7 +3634,11 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
 
 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   ASSERT(ToRegister(instr->result()).is(r0));
-  CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
+  CallKnownFunction(instr->target(),
+                    instr->arity(),
+                    instr,
+                    CALL_AS_FUNCTION,
+                    R1_UNINITIALIZED);
 }
 
 
@@ -3584,10 +3736,16 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
     ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
     int offset =
-        ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
+        (ToInteger32(const_operand) + instr->additional_index()) * kPointerSize
+        + FixedArray::kHeaderSize;
     __ str(value, FieldMemOperand(elements, offset));
   } else {
     __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
+    if (instr->additional_index() != 0) {
+      __ add(scratch,
+             scratch,
+             Operand(instr->additional_index() << kPointerSizeLog2));
+    }
     __ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
   }
 
@@ -3616,7 +3774,6 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
   Register scratch = scratch0();
   bool key_is_constant = instr->key()->IsConstantOperand();
   int constant_key = 0;
-  Label not_nan;
 
   // Calculate the effective address of the slot in the array to store the
   // double value.
@@ -3630,7 +3787,7 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
   }
   int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
   Operand operand = key_is_constant
-      ? Operand(constant_key * (1 << shift_size) +
+      ? Operand((constant_key << shift_size) +
                 FixedDoubleArray::kHeaderSize - kHeapObjectTag)
       : Operand(key, LSL, shift_size);
   __ add(scratch, elements, operand);
@@ -3639,14 +3796,16 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
            Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
   }
 
-  // Check for NaN. All NaNs must be canonicalized.
-  __ VFPCompareAndSetFlags(value, value);
-
-  // Only load canonical NaN if the comparison above set the overflow.
-  __ Vmov(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double(), vs);
+  if (instr->NeedsCanonicalization()) {
+    // Check for NaN. All NaNs must be canonicalized.
+    __ VFPCompareAndSetFlags(value, value);
+    // Only load canonical NaN if the comparison above set the overflow.
+    __ Vmov(value,
+            FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
+            vs);
+  }
 
-  __ bind(&not_nan);
-  __ vstr(value, scratch, 0);
+  __ vstr(value, scratch, instr->additional_index() << shift_size);
 }
 
 
@@ -3667,25 +3826,33 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
     key = ToRegister(instr->key());
   }
   int shift_size = ElementsKindToShiftSize(elements_kind);
+  int additional_offset = instr->additional_index() << shift_size;
 
   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
       elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
     CpuFeatures::Scope scope(VFP3);
     DwVfpRegister value(ToDoubleRegister(instr->value()));
-    Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size))
+    Operand operand(key_is_constant ? Operand(constant_key << shift_size)
                                     : Operand(key, LSL, shift_size));
     __ add(scratch0(), external_pointer, operand);
     if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
       __ vcvt_f32_f64(double_scratch0().low(), value);
-      __ vstr(double_scratch0().low(), scratch0(), 0);
+      __ vstr(double_scratch0().low(), scratch0(), additional_offset);
     } else {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
-      __ vstr(value, scratch0(), 0);
+      __ vstr(value, scratch0(), additional_offset);
     }
   } else {
     Register value(ToRegister(instr->value()));
+    if (instr->additional_index() != 0 && !key_is_constant) {
+      __ add(scratch0(), key, Operand(instr->additional_index()));
+    }
     MemOperand mem_operand(key_is_constant
-        ? MemOperand(external_pointer, constant_key * (1 << shift_size))
-        : MemOperand(external_pointer, key, LSL, shift_size));
+        ? MemOperand(external_pointer,
+                     ((constant_key + instr->additional_index())
+                         << shift_size))
+        : (instr->additional_index() == 0
+            ? MemOperand(external_pointer, key, LSL, shift_size)
+            : MemOperand(external_pointer, scratch0(), LSL, shift_size)));
     switch (elements_kind) {
       case EXTERNAL_PIXEL_ELEMENTS:
       case EXTERNAL_BYTE_ELEMENTS:
@@ -4650,9 +4817,10 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
         __ str(r2, FieldMemOperand(result, total_offset + 4));
       }
     } else if (elements->IsFixedArray()) {
+      Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
       for (int i = 0; i < elements_length; i++) {
         int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
-        Handle<Object> value = JSObject::GetElement(object, i);
+        Handle<Object> value(fast_elements->get(i));
         if (value->IsJSObject()) {
           Handle<JSObject> value_object = Handle<JSObject>::cast(value);
           __ add(r2, result, Operand(*offset));
@@ -4676,6 +4844,23 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
 
 void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
   int size = instr->hydrogen()->total_size();
+  ElementsKind boilerplate_elements_kind =
+      instr->hydrogen()->boilerplate()->GetElementsKind();
+
+  // Deopt if the literal boilerplate ElementsKind is of a type different than
+  // the expected one. The check isn't necessary if the boilerplate has already
+  // been converted to FAST_ELEMENTS.
+  if (boilerplate_elements_kind != FAST_ELEMENTS) {
+    __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
+    // Load map into r2.
+    __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+    // Load the map's "bit field 2".
+    __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
+    // Retrieve elements_kind from bit field 2.
+    __ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount);
+    __ cmp(r2, Operand(boilerplate_elements_kind));
+    DeoptimizeIf(ne, instr->environment());
+  }
 
   // Allocate all objects that are part of the literal in one big
   // allocation. This avoids multiple limit checks.
@@ -4971,7 +5156,7 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
   Register strict = scratch0();
   __ mov(strict, Operand(Smi::FromInt(strict_mode_flag())));
   __ Push(object, key, strict);
-  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  ASSERT(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
   SafepointGenerator safepoint_generator(
@@ -4984,7 +5169,7 @@ void LCodeGen::DoIn(LIn* instr) {
   Register obj = ToRegister(instr->object());
   Register key = ToRegister(instr->key());
   __ Push(key, obj);
-  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  ASSERT(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
   SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
index adb6e1b..c6a3af7 100644 (file)
@@ -215,12 +215,18 @@ class LCodeGen BASE_EMBEDDED {
                                int argc,
                                LInstruction* instr);
 
+  enum R1State {
+    R1_UNINITIALIZED,
+    R1_CONTAINS_TARGET
+  };
+
   // Generate a direct call to a known function.  Expects the function
   // to be in r1.
   void CallKnownFunction(Handle<JSFunction> function,
                          int arity,
                          LInstruction* instr,
-                         CallKind call_kind);
+                         CallKind call_kind,
+                         R1State r1_state);
 
   void LoadHeapObject(Register result, Handle<HeapObject> object);
 
@@ -317,6 +323,17 @@ class LCodeGen BASE_EMBEDDED {
                     Register source,
                     int* offset);
 
+  // Emit optimized code for integer division.
+  // Inputs are signed.
+  // All registers are clobbered.
+  // If 'remainder' is no_reg, it is not computed.
+  void EmitSignedIntegerDivisionByConstant(Register result,
+                                           Register dividend,
+                                           int32_t divisor,
+                                           Register remainder,
+                                           Register scratch,
+                                           LEnvironment* environment);
+
   struct JumpTableEntry {
     explicit inline JumpTableEntry(Address entry)
         : label(),
index 857c2bf..4da2fec 100644 (file)
@@ -3710,22 +3710,35 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
 }
 
 
-bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
-  if (r1.is(r2)) return true;
-  if (r1.is(r3)) return true;
-  if (r1.is(r4)) return true;
-  if (r2.is(r3)) return true;
-  if (r2.is(r4)) return true;
-  if (r3.is(r4)) return true;
-  return false;
+#ifdef DEBUG
+bool AreAliased(Register reg1,
+                Register reg2,
+                Register reg3,
+                Register reg4,
+                Register reg5,
+                Register reg6) {
+  int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
+    reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid();
+
+  RegList regs = 0;
+  if (reg1.is_valid()) regs |= reg1.bit();
+  if (reg2.is_valid()) regs |= reg2.bit();
+  if (reg3.is_valid()) regs |= reg3.bit();
+  if (reg4.is_valid()) regs |= reg4.bit();
+  if (reg5.is_valid()) regs |= reg5.bit();
+  if (reg6.is_valid()) regs |= reg6.bit();
+  int n_of_non_aliasing_regs = NumRegs(regs);
+
+  return n_of_valid_regs != n_of_non_aliasing_regs;
 }
+#endif
 
 
 CodePatcher::CodePatcher(byte* address, int instructions)
     : address_(address),
       instructions_(instructions),
       size_(instructions * Assembler::kInstrSize),
-      masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
+      masm_(NULL, address, size_ + Assembler::kGap) {
   // Create a new macro assembler pointing to the address of the code to patch.
   // The size is adjusted with kGap on order for the assembler to generate size
   // bytes of instructions without failing with buffer size constraints.
index 751a81c..360f4c1 100644 (file)
@@ -85,7 +85,14 @@ enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
 
 
-bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+#ifdef DEBUG
+bool AreAliased(Register reg1,
+                Register reg2,
+                Register reg3 = no_reg,
+                Register reg4 = no_reg,
+                Register reg5 = no_reg,
+                Register reg6 = no_reg);
+#endif
 
 
 // MacroAssembler implements a collection of frequently used macros.
@@ -1321,7 +1328,6 @@ class MacroAssembler: public Assembler {
 };
 
 
-#ifdef ENABLE_DEBUGGER_SUPPORT
 // The code patcher is used to patch (typically) small parts of code e.g. for
 // debugging and other types of instrumentation. When using the code patcher
 // the exact number of bytes specified must be emitted. It is not legal to emit
@@ -1351,7 +1357,6 @@ class CodePatcher {
   int size_;  // Number of bytes of the expected patch size.
   MacroAssembler masm_;  // Macro assembler used to generate the code.
 };
-#endif  // ENABLE_DEBUGGER_SUPPORT
 
 
 // -----------------------------------------------------------------------------
@@ -1367,11 +1372,6 @@ inline MemOperand GlobalObjectOperand()  {
 }
 
 
-static inline MemOperand QmlGlobalObjectOperand()  {
-  return ContextOperand(cp, Context::QML_GLOBAL_INDEX);
-}
-
-
 #ifdef GENERATED_CODE_COVERAGE
 #define CODE_COVERAGE_STRINGIFY(x) #x
 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
index cfd93bc..49c0982 100644 (file)
@@ -443,8 +443,10 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
   Label exit;
 
   // Check that the map of the object hasn't changed.
+  CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
+                                             : REQUIRE_EXACT_MAP;
   __ CheckMap(receiver_reg, scratch, Handle<Map>(object->map()), miss_label,
-              DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+              DO_SMI_CHECK, mode);
 
   // Perform global security token check if needed.
   if (object->IsJSGlobalProxy()) {
@@ -580,6 +582,8 @@ static void PushInterceptorArguments(MacroAssembler* masm,
   __ push(holder);
   __ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
   __ push(scratch);
+  __ mov(scratch, Operand(ExternalReference::isolate_address()));
+  __ push(scratch);
 }
 
 
@@ -594,7 +598,7 @@ static void CompileCallLoadPropertyWithInterceptor(
   ExternalReference ref =
       ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
                         masm->isolate());
-  __ mov(r0, Operand(5));
+  __ mov(r0, Operand(6));
   __ mov(r1, Operand(ref));
 
   CEntryStub stub(1);
@@ -602,9 +606,9 @@ static void CompileCallLoadPropertyWithInterceptor(
 }
 
 
-static const int kFastApiCallArguments = 3;
+static const int kFastApiCallArguments = 4;
 
-// Reserves space for the extra arguments to FastHandleApiCall in the
+// Reserves space for the extra arguments to API function in the
 // caller's frame.
 //
 // These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
@@ -630,7 +634,8 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
   //  -- sp[0]              : holder (set by CheckPrototypes)
   //  -- sp[4]              : callee JS function
   //  -- sp[8]              : call data
-  //  -- sp[12]             : last JS argument
+  //  -- sp[12]             : isolate
+  //  -- sp[16]             : last JS argument
   //  -- ...
   //  -- sp[(argc + 3) * 4] : first JS argument
   //  -- sp[(argc + 4) * 4] : receiver
@@ -640,7 +645,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
   __ LoadHeapObject(r5, function);
   __ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
 
-  // Pass the additional arguments FastHandleApiCall expects.
+  // Pass the additional arguments.
   Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
   Handle<Object> call_data(api_call_info->data());
   if (masm->isolate()->heap()->InNewSpace(*call_data)) {
@@ -649,13 +654,15 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
   } else {
     __ Move(r6, call_data);
   }
-  // Store JS function and call data.
-  __ stm(ib, sp, r5.bit() | r6.bit());
+  __ mov(r7, Operand(ExternalReference::isolate_address()));
+  // Store JS function, call data and isolate.
+  __ stm(ib, sp, r5.bit() | r6.bit() | r7.bit());
 
-  // r2 points to call data as expected by Arguments
-  // (refer to layout above).
-  __ add(r2, sp, Operand(2 * kPointerSize));
+  // Prepare arguments.
+  __ add(r2, sp, Operand(3 * kPointerSize));
 
+  // Allocate the v8::Arguments structure in the arguments' space since
+  // it's not controlled by GC.
   const int kApiStackSpace = 4;
 
   FrameScope frame_scope(masm, StackFrame::MANUAL);
@@ -664,9 +671,9 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
   // r0 = v8::Arguments&
   // Arguments is after the return address.
   __ add(r0, sp, Operand(1 * kPointerSize));
-  // v8::Arguments::implicit_args = data
+  // v8::Arguments::implicit_args_
   __ str(r2, MemOperand(r0, 0 * kPointerSize));
-  // v8::Arguments::values = last argument
+  // v8::Arguments::values_
   __ add(ip, r2, Operand(argc * kPointerSize));
   __ str(ip, MemOperand(r0, 1 * kPointerSize));
   // v8::Arguments::length_ = argc
@@ -843,7 +850,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
     __ CallExternalReference(
         ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
                           masm->isolate()),
-        5);
+        6);
     // Restore the name_ register.
     __ pop(name_);
     // Leave the internal frame.
@@ -1202,7 +1209,9 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
   } else {
     __ Move(scratch3, Handle<Object>(callback->data()));
   }
-  __ Push(reg, scratch3, name_reg);
+  __ Push(reg, scratch3);
+  __ mov(scratch3, Operand(ExternalReference::isolate_address()));
+  __ Push(scratch3, name_reg);
   __ mov(r0, sp);  // r0 = Handle<String>
 
   const int kApiStackSpace = 1;
@@ -1214,7 +1223,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
   __ str(scratch2, MemOperand(sp, 1 * kPointerSize));
   __ add(r1, sp, Operand(1 * kPointerSize));  // r1 = AccessorInfo&
 
-  const int kStackUnwindSpace = 4;
+  const int kStackUnwindSpace = 5;
   Address getter_address = v8::ToCData<Address>(callback->getter());
   ApiFunction fun(getter_address);
   ExternalReference ref =
@@ -1264,12 +1273,19 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
                                           name, miss);
     ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
 
+    // Preserve the receiver register explicitly whenever it is different from
+    // the holder and it is needed should the interceptor return without any
+    // result. The CALLBACKS case needs the receiver to be passed into C++ code,
+    // the FIELD case might cause a miss during the prototype check.
+    bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
+    bool must_preserve_receiver_reg = !receiver.is(holder_reg) &&
+        (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
+
     // Save necessary data before invoking an interceptor.
     // Requires a frame to make GC aware of pushed pointers.
     {
       FrameScope frame_scope(masm(), StackFrame::INTERNAL);
-      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-        // CALLBACKS case needs a receiver to be passed into C++ callback.
+      if (must_preserve_receiver_reg) {
         __ Push(receiver, holder_reg, name_reg);
       } else {
         __ Push(holder_reg, name_reg);
@@ -1294,14 +1310,14 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
       __ bind(&interceptor_failed);
       __ pop(name_reg);
       __ pop(holder_reg);
-      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+      if (must_preserve_receiver_reg) {
         __ pop(receiver);
       }
       // Leave the internal frame.
     }
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into |holder| register.
-    if (*interceptor_holder != lookup->holder()) {
+    if (must_perfrom_prototype_check) {
       holder_reg = CheckPrototypes(interceptor_holder,
                                    holder_reg,
                                    Handle<JSObject>(lookup->holder()),
@@ -1335,20 +1351,19 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
       if (!receiver.is(holder_reg)) {
         ASSERT(scratch1.is(holder_reg));
         __ Push(receiver, holder_reg);
-        __ ldr(scratch3,
-               FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
-        __ Push(scratch3, scratch2, name_reg);
       } else {
         __ push(receiver);
-        __ ldr(scratch3,
-               FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
-        __ Push(holder_reg, scratch3, scratch2, name_reg);
+        __ push(holder_reg);
       }
+      __ ldr(scratch3,
+             FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
+      __ mov(scratch1, Operand(ExternalReference::isolate_address()));
+      __ Push(scratch3, scratch1, scratch2, name_reg);
 
       ExternalReference ref =
           ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
                             masm()->isolate());
-      __ TailCallExternalReference(ref, 5, 1);
+      __ TailCallExternalReference(ref, 6, 1);
     }
   } else {  // !compile_followup_inline
     // Call the runtime system to load the interceptor.
@@ -1362,7 +1377,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
     ExternalReference ref =
         ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
                           masm()->isolate());
-    __ TailCallExternalReference(ref, 5, 1);
+    __ TailCallExternalReference(ref, 6, 1);
   }
 }
 
@@ -3368,6 +3383,44 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
 }
 
 
+static void GenerateSmiKeyCheck(MacroAssembler* masm,
+                                Register key,
+                                Register scratch0,
+                                Register scratch1,
+                                DwVfpRegister double_scratch0,
+                                Label* fail) {
+  if (CpuFeatures::IsSupported(VFP3)) {
+    CpuFeatures::Scope scope(VFP3);
+    Label key_ok;
+    // Check for smi or a smi inside a heap number.  We convert the heap
+    // number and check if the conversion is exact and fits into the smi
+    // range.
+    __ JumpIfSmi(key, &key_ok);
+    __ CheckMap(key,
+                scratch0,
+                Heap::kHeapNumberMapRootIndex,
+                fail,
+                DONT_DO_SMI_CHECK);
+    __ sub(ip, key, Operand(kHeapObjectTag));
+    __ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
+    __ EmitVFPTruncate(kRoundToZero,
+                       double_scratch0.low(),
+                       double_scratch0,
+                       scratch0,
+                       scratch1,
+                       kCheckForInexactConversion);
+    __ b(ne, fail);
+    __ vmov(scratch0, double_scratch0.low());
+    __ TrySmiTag(scratch0, fail, scratch1);
+    __ mov(key, scratch0);
+    __ bind(&key_ok);
+  } else {
+    // Check that the key is a smi.
+    __ JumpIfNotSmi(key, fail);
+  }
+}
+
+
 void KeyedLoadStubCompiler::GenerateLoadExternalArray(
     MacroAssembler* masm,
     ElementsKind elements_kind) {
@@ -3384,8 +3437,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
 
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &miss_force_generic);
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, key, r4, r5, d1, &miss_force_generic);
 
   __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
   // r3: elements array
@@ -3715,8 +3768,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
 
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &miss_force_generic);
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, key, r4, r5, d1, &miss_force_generic);
 
   __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
 
@@ -4041,8 +4094,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
 
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(r0, &miss_force_generic);
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, r0, r4, r5, d1, &miss_force_generic);
 
   // Get the elements array.
   __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
@@ -4093,8 +4146,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
 
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key_reg, &miss_force_generic);
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic);
 
   // Get the elements array.
   __ ldr(elements_reg,
@@ -4169,8 +4222,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
 
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key_reg, &miss_force_generic);
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic);
 
   if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
     __ JumpIfNotSmi(value_reg, &transition_elements_kind);
@@ -4336,7 +4389,9 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
-  __ JumpIfNotSmi(key_reg, &miss_force_generic);
+
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, &miss_force_generic);
 
   __ ldr(elements_reg,
          FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
@@ -4427,6 +4482,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
     // Increment the length of the array.
     __ mov(length_reg, Operand(Smi::FromInt(1)));
     __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+    __ ldr(elements_reg,
+           FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
     __ jmp(&finish_store);
 
     __ bind(&check_capacity);
index daa75d5..a1cc5b6 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -465,15 +465,19 @@ function ArrayPush() {
 }
 
 
+// Returns an array containing the array elements of the object followed
+// by the array elements of each argument in order. See ECMA-262,
+// section 15.4.4.7.
 function ArrayConcat(arg1) {  // length == 1
   if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
     throw MakeTypeError("called_on_null_or_undefined",
                         ["Array.prototype.concat"]);
   }
 
+  var array = ToObject(this);
   var arg_count = %_ArgumentsLength();
   var arrays = new InternalArray(1 + arg_count);
-  arrays[0] = this;
+  arrays[0] = array;
   for (var i = 0; i < arg_count; i++) {
     arrays[i + 1] = %_Arguments(i);
   }
@@ -823,7 +827,8 @@ function ArraySort(comparefn) {
       var element = a[i];
       var order = %_CallFunction(receiver, element, pivot, comparefn);
       if (order < 0) {
-        %_SwapElements(a, i, low_end);
+        a[i] = a[low_end];
+        a[low_end] = element;
         low_end++;
       } else if (order > 0) {
         do {
@@ -832,9 +837,12 @@ function ArraySort(comparefn) {
           var top_elem = a[high_start];
           order = %_CallFunction(receiver, top_elem, pivot, comparefn);
         } while (order > 0);
-        %_SwapElements(a, i, high_start);
+        a[i] = a[high_start];
+        a[high_start] = element;
         if (order < 0) {
-          %_SwapElements(a, i, low_end);
+          element = a[i];
+          a[i] = a[low_end];
+          a[low_end] = element;
           low_end++;
         }
       }
@@ -1023,13 +1031,28 @@ function ArrayFilter(f, receiver) {
   var result = new $Array();
   var accumulator = new InternalArray();
   var accumulator_length = 0;
-  for (var i = 0; i < length; i++) {
-    if (i in array) {
-      var element = array[i];
-      if (%_CallFunction(receiver, element, i, array, f)) {
-        accumulator[accumulator_length++] = element;
+  if (%DebugCallbackSupportsStepping(f)) {
+    for (var i = 0; i < length; i++) {
+      if (i in array) {
+        var element = array[i];
+        // Prepare break slots for debugger step in.
+        %DebugPrepareStepInIfStepping(f);
+        if (%_CallFunction(receiver, element, i, array, f)) {
+          accumulator[accumulator_length++] = element;
+        }
+      }
+    }
+  } else {
+    // This is a duplicate of the previous loop sans debug stepping.
+    for (var i = 0; i < length; i++) {
+      if (i in array) {
+        var element = array[i];
+        if (%_CallFunction(receiver, element, i, array, f)) {
+          accumulator[accumulator_length++] = element;
+        }
       }
     }
+    // End of duplicate.
   }
   %MoveArrayContents(accumulator, result);
   return result;
@@ -1055,12 +1078,24 @@ function ArrayForEach(f, receiver) {
   } else if (!IS_SPEC_OBJECT(receiver)) {
     receiver = ToObject(receiver);
   }
-
-  for (var i = 0; i < length; i++) {
-    if (i in array) {
-      var element = array[i];
-      %_CallFunction(receiver, element, i, array, f);
+  if (%DebugCallbackSupportsStepping(f)) {
+    for (var i = 0; i < length; i++) {
+      if (i in array) {
+        var element = array[i];
+        // Prepare break slots for debugger step in.
+        %DebugPrepareStepInIfStepping(f);
+        %_CallFunction(receiver, element, i, array, f);
+      }
     }
+  } else {
+    // This is a duplicate of the previous loop sans debug stepping.
+    for (var i = 0; i < length; i++) {
+      if (i in array) {
+        var element = array[i];
+        %_CallFunction(receiver, element, i, array, f);
+      }
+    }
+    // End of duplicate.
   }
 }
 
@@ -1087,11 +1122,24 @@ function ArraySome(f, receiver) {
     receiver = ToObject(receiver);
   }
 
-  for (var i = 0; i < length; i++) {
-    if (i in array) {
-      var element = array[i];
-      if (%_CallFunction(receiver, element, i, array, f)) return true;
+  if (%DebugCallbackSupportsStepping(f)) {
+    for (var i = 0; i < length; i++) {
+      if (i in array) {
+        var element = array[i];
+        // Prepare break slots for debugger step in.
+        %DebugPrepareStepInIfStepping(f);
+        if (%_CallFunction(receiver, element, i, array, f)) return true;
+      }
+    }
+  } else {
+    // This is a duplicate of the previous loop sans debug stepping.
+    for (var i = 0; i < length; i++) {
+      if (i in array) {
+        var element = array[i];
+        if (%_CallFunction(receiver, element, i, array, f)) return true;
+      }
     }
+    // End of duplicate.
   }
   return false;
 }
@@ -1117,11 +1165,24 @@ function ArrayEvery(f, receiver) {
     receiver = ToObject(receiver);
   }
 
-  for (var i = 0; i < length; i++) {
-    if (i in array) {
-      var element = array[i];
-      if (!%_CallFunction(receiver, element, i, array, f)) return false;
+  if (%DebugCallbackSupportsStepping(f)) {
+    for (var i = 0; i < length; i++) {
+      if (i in array) {
+        var element = array[i];
+        // Prepare break slots for debugger step in.
+        %DebugPrepareStepInIfStepping(f);
+        if (!%_CallFunction(receiver, element, i, array, f)) return false;
+      }
+    }
+  } else {
+    // This is a duplicate of the previous loop sans debug stepping.
+    for (var i = 0; i < length; i++) {
+      if (i in array) {
+        var element = array[i];
+        if (!%_CallFunction(receiver, element, i, array, f)) return false;
+      }
     }
+    // End of duplicate.
   }
   return true;
 }
@@ -1148,11 +1209,24 @@ function ArrayMap(f, receiver) {
 
   var result = new $Array();
   var accumulator = new InternalArray(length);
-  for (var i = 0; i < length; i++) {
-    if (i in array) {
-      var element = array[i];
-      accumulator[i] = %_CallFunction(receiver, element, i, array, f);
+  if (%DebugCallbackSupportsStepping(f)) {
+    for (var i = 0; i < length; i++) {
+      if (i in array) {
+        var element = array[i];
+        // Prepare break slots for debugger step in.
+        %DebugPrepareStepInIfStepping(f);
+        accumulator[i] = %_CallFunction(receiver, element, i, array, f);
+      }
     }
+  } else {
+    // This is a duplicate of the previous loop sans debug stepping.
+    for (var i = 0; i < length; i++) {
+      if (i in array) {
+        var element = array[i];
+        accumulator[i] = %_CallFunction(receiver, element, i, array, f);
+      }
+    }
+    // End of duplicate.
   }
   %MoveArrayContents(accumulator, result);
   return result;
@@ -1307,11 +1381,27 @@ function ArrayReduce(callback, current) {
   }
 
   var receiver = %GetDefaultReceiver(callback);
-  for (; i < length; i++) {
-    if (i in array) {
-      var element = array[i];
-      current = %_CallFunction(receiver, current, element, i, array, callback);
+
+  if (%DebugCallbackSupportsStepping(callback)) {
+    for (; i < length; i++) {
+      if (i in array) {
+        var element = array[i];
+        // Prepare break slots for debugger step in.
+        %DebugPrepareStepInIfStepping(callback);
+        current =
+          %_CallFunction(receiver, current, element, i, array, callback);
+      }
+    }
+  } else {
+    // This is a duplicate of the previous loop sans debug stepping.
+    for (; i < length; i++) {
+      if (i in array) {
+        var element = array[i];
+        current =
+          %_CallFunction(receiver, current, element, i, array, callback);
+      }
     }
+    // End of duplicate.
   }
   return current;
 }
@@ -1344,11 +1434,27 @@ function ArrayReduceRight(callback, current) {
   }
 
   var receiver = %GetDefaultReceiver(callback);
-  for (; i >= 0; i--) {
-    if (i in array) {
-      var element = array[i];
-      current = %_CallFunction(receiver, current, element, i, array, callback);
+
+  if (%DebugCallbackSupportsStepping(callback)) {
+    for (; i >= 0; i--) {
+      if (i in array) {
+        var element = array[i];
+        // Prepare break slots for debugger step in.
+        %DebugPrepareStepInIfStepping(callback);
+        current =
+          %_CallFunction(receiver, current, element, i, array, callback);
+      }
+    }
+  } else {
+    // This is a duplicate of the previous loop sans debug stepping.
+    for (; i >= 0; i--) {
+      if (i in array) {
+        var element = array[i];
+        current =
+          %_CallFunction(receiver, current, element, i, array, callback);
+      }
     }
+    // End of duplicate.
   }
   return current;
 }
index 6deca26..05fe320 100644 (file)
@@ -62,6 +62,10 @@ class AssemblerBase: public Malloced {
   Isolate* isolate() const { return isolate_; }
   int jit_cookie() { return jit_cookie_; }
 
+  // Overwrite a host NaN with a quiet target NaN.  Used by mksnapshot for
+  // cross-snapshotting.
+  static void QuietNaN(HeapObject* nan) { }
+
  private:
   Isolate* isolate_;
   int jit_cookie_;
index 71bec1a..6f9fd7a 100644 (file)
@@ -171,11 +171,6 @@ LanguageMode FunctionLiteral::language_mode() const {
 }
 
 
-QmlModeFlag FunctionLiteral::qml_mode_flag() const {
-  return scope()->qml_mode_flag();
-}
-
-
 ObjectLiteral::Property::Property(Literal* key,
                                   Expression* value,
                                   Isolate* isolate) {
@@ -554,11 +549,6 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
   is_monomorphic_ = oracle->CallIsMonomorphic(this);
   Property* property = expression()->AsProperty();
   if (property == NULL) {
-    if (VariableProxy *proxy = expression()->AsVariableProxy()) {
-        if (proxy->var()->is_qml_global())
-            return;
-    }
-
     // Function call.  Specialize for monomorphic calls.
     if (is_monomorphic_) target_ = oracle->GetCallTarget(this);
   } else {
@@ -972,6 +962,14 @@ RegExpDisjunction::RegExpDisjunction(ZoneList<RegExpTree*>* alternatives)
 }
 
 
+static int IncreaseBy(int previous, int increase) {
+  if (RegExpTree::kInfinity - previous < increase) {
+    return RegExpTree::kInfinity;
+  } else {
+    return previous + increase;
+  }
+}
+
 RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
     : nodes_(nodes) {
   ASSERT(nodes->length() > 1);
@@ -979,13 +977,10 @@ RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
   max_match_ = 0;
   for (int i = 0; i < nodes->length(); i++) {
     RegExpTree* node = nodes->at(i);
-    min_match_ += node->min_match();
+    int node_min_match = node->min_match();
+    min_match_ = IncreaseBy(min_match_, node_min_match);
     int node_max_match = node->max_match();
-    if (kInfinity - max_match_ < node_max_match) {
-      max_match_ = kInfinity;
-    } else {
-      max_match_ += node->max_match();
-    }
+    max_match_ = IncreaseBy(max_match_, node_max_match);
   }
 }
 
@@ -1003,138 +998,78 @@ CaseClause::CaseClause(Isolate* isolate,
 }
 
 
-#define INCREASE_NODE_COUNT(NodeType) \
+#define REGULAR_NODE(NodeType) \
   void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
     increase_node_count(); \
   }
+#define DONT_OPTIMIZE_NODE(NodeType) \
+  void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
+    increase_node_count(); \
+    add_flag(kDontOptimize); \
+    add_flag(kDontInline); \
+    add_flag(kDontSelfOptimize); \
+  }
+#define DONT_INLINE_NODE(NodeType) \
+  void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
+    increase_node_count(); \
+    add_flag(kDontInline); \
+  }
+#define DONT_SELFOPTIMIZE_NODE(NodeType) \
+  void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
+    increase_node_count(); \
+    add_flag(kDontSelfOptimize); \
+  }
 
-INCREASE_NODE_COUNT(VariableDeclaration)
-INCREASE_NODE_COUNT(FunctionDeclaration)
-INCREASE_NODE_COUNT(ModuleDeclaration)
-INCREASE_NODE_COUNT(ImportDeclaration)
-INCREASE_NODE_COUNT(ExportDeclaration)
-INCREASE_NODE_COUNT(ModuleLiteral)
-INCREASE_NODE_COUNT(ModuleVariable)
-INCREASE_NODE_COUNT(ModulePath)
-INCREASE_NODE_COUNT(ModuleUrl)
-INCREASE_NODE_COUNT(Block)
-INCREASE_NODE_COUNT(ExpressionStatement)
-INCREASE_NODE_COUNT(EmptyStatement)
-INCREASE_NODE_COUNT(IfStatement)
-INCREASE_NODE_COUNT(ContinueStatement)
-INCREASE_NODE_COUNT(BreakStatement)
-INCREASE_NODE_COUNT(ReturnStatement)
-INCREASE_NODE_COUNT(Conditional)
-INCREASE_NODE_COUNT(Literal)
-INCREASE_NODE_COUNT(ObjectLiteral)
-INCREASE_NODE_COUNT(Assignment)
-INCREASE_NODE_COUNT(Throw)
-INCREASE_NODE_COUNT(Property)
-INCREASE_NODE_COUNT(UnaryOperation)
-INCREASE_NODE_COUNT(CountOperation)
-INCREASE_NODE_COUNT(BinaryOperation)
-INCREASE_NODE_COUNT(CompareOperation)
-INCREASE_NODE_COUNT(ThisFunction)
-INCREASE_NODE_COUNT(Call)
-INCREASE_NODE_COUNT(CallNew)
-
-#undef INCREASE_NODE_COUNT
-
-
-void AstConstructionVisitor::VisitWithStatement(WithStatement* node) {
-  increase_node_count();
-  add_flag(kDontOptimize);
-  add_flag(kDontInline);
-}
-
-
-void AstConstructionVisitor::VisitSwitchStatement(SwitchStatement* node) {
-  increase_node_count();
-  add_flag(kDontInline);
-}
-
-
-void AstConstructionVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
-  increase_node_count();
-  add_flag(kDontSelfOptimize);
-}
-
-
-void AstConstructionVisitor::VisitWhileStatement(WhileStatement* node) {
-  increase_node_count();
-  add_flag(kDontSelfOptimize);
-}
-
-
-void AstConstructionVisitor::VisitForStatement(ForStatement* node) {
-  increase_node_count();
-  add_flag(kDontSelfOptimize);
-}
-
-
-void AstConstructionVisitor::VisitForInStatement(ForInStatement* node) {
-  increase_node_count();
-  add_flag(kDontSelfOptimize);
-}
-
-
-void AstConstructionVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
-  increase_node_count();
-  add_flag(kDontOptimize);
-  add_flag(kDontInline);
-}
-
-
-void AstConstructionVisitor::VisitTryFinallyStatement(
-    TryFinallyStatement* node) {
-  increase_node_count();
-  add_flag(kDontOptimize);
-  add_flag(kDontInline);
-}
-
-
-void AstConstructionVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
-  increase_node_count();
-  add_flag(kDontOptimize);
-  add_flag(kDontInline);
-}
-
-
-void AstConstructionVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
-  increase_node_count();
-  add_flag(kDontInline);
-}
-
-
-void AstConstructionVisitor::VisitSharedFunctionInfoLiteral(
-    SharedFunctionInfoLiteral* node) {
-  increase_node_count();
-  add_flag(kDontOptimize);
-  add_flag(kDontInline);
-}
-
-
-void AstConstructionVisitor::VisitVariableProxy(VariableProxy* node) {
-  increase_node_count();
-  // In theory, we'd have to add:
-  // if(node->var()->IsLookupSlot()) { add_flag(kDontInline); }
-  // However, node->var() is usually not bound yet at VariableProxy creation
-  // time, and LOOKUP variables only result from constructs that cannot
-  // be inlined anyway.
-}
-
-
-void AstConstructionVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
-  increase_node_count();
-  add_flag(kDontInline);  // TODO(1322): Allow materialized literals.
-}
-
-
-void AstConstructionVisitor::VisitArrayLiteral(ArrayLiteral* node) {
-  increase_node_count();
-  add_flag(kDontInline);  // TODO(1322): Allow materialized literals.
-}
-
+REGULAR_NODE(VariableDeclaration)
+REGULAR_NODE(FunctionDeclaration)
+REGULAR_NODE(Block)
+REGULAR_NODE(ExpressionStatement)
+REGULAR_NODE(EmptyStatement)
+REGULAR_NODE(IfStatement)
+REGULAR_NODE(ContinueStatement)
+REGULAR_NODE(BreakStatement)
+REGULAR_NODE(ReturnStatement)
+REGULAR_NODE(SwitchStatement)
+REGULAR_NODE(Conditional)
+REGULAR_NODE(Literal)
+REGULAR_NODE(ObjectLiteral)
+REGULAR_NODE(Assignment)
+REGULAR_NODE(Throw)
+REGULAR_NODE(Property)
+REGULAR_NODE(UnaryOperation)
+REGULAR_NODE(CountOperation)
+REGULAR_NODE(BinaryOperation)
+REGULAR_NODE(CompareOperation)
+REGULAR_NODE(ThisFunction)
+REGULAR_NODE(Call)
+REGULAR_NODE(CallNew)
+// In theory, for VariableProxy we'd have to add:
+// if (node->var()->IsLookupSlot()) add_flag(kDontInline);
+// But node->var() is usually not bound yet at VariableProxy creation time, and
+// LOOKUP variables only result from constructs that cannot be inlined anyway.
+REGULAR_NODE(VariableProxy)
+
+DONT_OPTIMIZE_NODE(ModuleDeclaration)
+DONT_OPTIMIZE_NODE(ImportDeclaration)
+DONT_OPTIMIZE_NODE(ExportDeclaration)
+DONT_OPTIMIZE_NODE(ModuleLiteral)
+DONT_OPTIMIZE_NODE(ModuleVariable)
+DONT_OPTIMIZE_NODE(ModulePath)
+DONT_OPTIMIZE_NODE(ModuleUrl)
+DONT_OPTIMIZE_NODE(WithStatement)
+DONT_OPTIMIZE_NODE(TryCatchStatement)
+DONT_OPTIMIZE_NODE(TryFinallyStatement)
+DONT_OPTIMIZE_NODE(DebuggerStatement)
+DONT_OPTIMIZE_NODE(SharedFunctionInfoLiteral)
+
+DONT_INLINE_NODE(FunctionLiteral)
+DONT_INLINE_NODE(RegExpLiteral)  // TODO(1322): Allow materialized literals.
+DONT_INLINE_NODE(ArrayLiteral)  // TODO(1322): Allow materialized literals.
+
+DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
+DONT_SELFOPTIMIZE_NODE(WhileStatement)
+DONT_SELFOPTIMIZE_NODE(ForStatement)
+DONT_SELFOPTIMIZE_NODE(ForInStatement)
 
 void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
   increase_node_count();
@@ -1152,6 +1087,11 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
   }
 }
 
+#undef REGULAR_NODE
+#undef DONT_OPTIMIZE_NODE
+#undef DONT_INLINE_NODE
+#undef DONT_SELFOPTIMIZE_NODE
+
 
 Handle<String> Literal::ToString() {
   if (handle_->IsString()) return Handle<String>::cast(handle_);
index 6b85b7c..dad8057 100644 (file)
@@ -421,8 +421,8 @@ class Block: public BreakableStatement {
   ZoneList<Statement*>* statements() { return &statements_; }
   bool is_initializer_block() const { return is_initializer_block_; }
 
-  Scope* block_scope() const { return block_scope_; }
-  void set_block_scope(Scope* block_scope) { block_scope_ = block_scope; }
+  Scope* scope() const { return scope_; }
+  void set_scope(Scope* scope) { scope_ = scope; }
 
  protected:
   template<class> friend class AstNodeFactory;
@@ -434,13 +434,13 @@ class Block: public BreakableStatement {
       : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY),
         statements_(capacity),
         is_initializer_block_(is_initializer_block),
-        block_scope_(NULL) {
+        scope_(NULL) {
   }
 
  private:
   ZoneList<Statement*> statements_;
   bool is_initializer_block_;
-  Scope* block_scope_;
+  Scope* scope_;
 };
 
 
@@ -608,6 +608,7 @@ class ModuleLiteral: public Module {
   DECLARE_NODE_TYPE(ModuleLiteral)
 
   Block* body() const { return body_; }
+  Handle<Context> context() const { return context_; }
 
  protected:
   template<class> friend class AstNodeFactory;
@@ -619,6 +620,7 @@ class ModuleLiteral: public Module {
 
  private:
   Block* body_;
+  Handle<Context> context_;
 };
 
 
@@ -2045,8 +2047,6 @@ class FunctionLiteral: public Expression {
   bool is_anonymous() const { return IsAnonymous::decode(bitfield_); }
   bool is_classic_mode() const { return language_mode() == CLASSIC_MODE; }
   LanguageMode language_mode() const;
-  bool qml_mode() const { return qml_mode_flag() == kQmlMode; }
-  QmlModeFlag qml_mode_flag() const;
 
   int materialized_literal_count() { return materialized_literal_count_; }
   int expected_property_count() { return expected_property_count_; }
index 55de87c..e2057ed 100644 (file)
@@ -157,16 +157,7 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
   (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
 #include "atomicops_internals_x86_gcc.h"
 #elif defined(__GNUC__) && defined(V8_HOST_ARCH_ARM)
-  // We need special handling for QNX as the existing code in
-  // atomicops_internals_arm_gcc.h is actually Linux-specific. This is due to
-  // it using a magic hard-wired function address for LinuxKernelCmpxchgFunc.
-  // The QNX implementation uses the equivalent system call for that platform
-  // but is not source compatible.
-  #if defined(__QNXNTO__)
-    #include "atomicops_internals_arm_qnx.h"
-  #else
-    #include "atomicops_internals_arm_gcc.h"
-  #endif
+#include "atomicops_internals_arm_gcc.h"
 #elif defined(__GNUC__) && defined(V8_HOST_ARCH_MIPS)
 #include "atomicops_internals_mips_gcc.h"
 #else
diff --git a/src/3rdparty/v8/src/atomicops_internals_arm_qnx.h b/src/3rdparty/v8/src/atomicops_internals_arm_qnx.h
deleted file mode 100644 (file)
index 39c9850..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2012 Research in Motion. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_ATOMICOPS_INTERNALS_ARM_QNX_H_
-#define V8_ATOMICOPS_INTERNALS_ARM_QNX_H_
-
-#include <arm/cpuinline.h>
-#include <arm/smpxchg.h>
-
-namespace v8 {
-namespace internal {
-
-inline void MemoryBarrier() {
-  __cpu_membarrier();
-}
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
-                                         Atomic32 old_value,
-                                         Atomic32 new_value) {
-  return _smp_cmpxchg(reinterpret_cast<volatile unsigned*>(ptr), old_value, new_value);
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
-                                         Atomic32 new_value) {
-  return _smp_xchg(reinterpret_cast<volatile unsigned*>(ptr), new_value);
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
-                                          Atomic32 increment) {
-  for (;;) {
-    // Atomic exchange the old value with an incremented one.
-    Atomic32 old_value = *ptr;
-    Atomic32 new_value = old_value + increment;
-    if (_smp_cmpxchg(reinterpret_cast<volatile unsigned*>(ptr), old_value, new_value)) {
-      // The exchange took place as expected.
-      return new_value;
-    }
-    // Otherwise, *ptr changed mid-loop and we need to retry.
-  }
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
-                                        Atomic32 increment) {
-  MemoryBarrier();
-  return NoBarrier_AtomicIncrement(ptr, increment);
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
-  MemoryBarrier();
-  *ptr = value;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
-  return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
-  Atomic32 value = *ptr;
-  MemoryBarrier();
-  return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
-} }  // namespace v8::internal
-
-#endif  // V8_ATOMICOPS_INTERNALS_ARM_QNX_H_
-
index 26451ff..048a027 100644 (file)
@@ -484,8 +484,8 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
 
     global_context()->set_initial_object_prototype(*prototype);
     SetPrototype(object_fun, prototype);
-    object_function_map->
-      set_instance_descriptors(heap->empty_descriptor_array());
+    object_function_map->set_instance_descriptors(
+        heap->empty_descriptor_array());
   }
 
   // Allocate the empty function as the prototype for function ECMAScript
@@ -516,12 +516,10 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
   function_instance_map_writable_prototype_->set_prototype(*empty_function);
 
   // Allocate the function map first and then patch the prototype later
-  Handle<Map> empty_fm = factory->CopyMapDropDescriptors(
-      function_without_prototype_map);
-  empty_fm->set_instance_descriptors(
-      function_without_prototype_map->instance_descriptors());
-  empty_fm->set_prototype(global_context()->object_function()->prototype());
-  empty_function->set_map(*empty_fm);
+  Handle<Map> empty_function_map = CreateFunctionMap(DONT_ADD_PROTOTYPE);
+  empty_function_map->set_prototype(
+      global_context()->object_function()->prototype());
+  empty_function->set_map(*empty_function_map);
   return empty_function;
 }
 
@@ -811,7 +809,6 @@ void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) {
   Handle<JSBuiltinsObject> builtins_global(global_context_->builtins());
   global_context_->set_extension(*inner_global);
   global_context_->set_global(*inner_global);
-  global_context_->set_qml_global(*inner_global);
   global_context_->set_security_token(*inner_global);
   static const PropertyAttributes attributes =
       static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
@@ -837,7 +834,6 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
   // Set extension and global object.
   global_context()->set_extension(*inner_global);
   global_context()->set_global(*inner_global);
-  global_context()->set_qml_global(*inner_global);
   // Security setup: Set the security token of the global object to
   // its the inner global. This makes the security check between two
   // different contexts fail by default even in case of global
@@ -1013,7 +1009,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
     proto_map->set_prototype(global_context()->initial_object_prototype());
     Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
     proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
-                                 heap->empty_string());
+                                 heap->query_colon_symbol());
     proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex,
                                  heap->false_value());
     proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex,
@@ -2161,7 +2157,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
     Handle<DescriptorArray> descs =
         Handle<DescriptorArray>(from->map()->instance_descriptors());
     for (int i = 0; i < descs->number_of_descriptors(); i++) {
-      PropertyDetails details = PropertyDetails(descs->GetDetails(i));
+      PropertyDetails details = descs->GetDetails(i);
       switch (details.type()) {
         case FIELD: {
           HandleScope inner;
index 0f493e6..84a0c3d 100644 (file)
@@ -412,12 +412,17 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
   HeapObject* elms = array->elements();
   Map* map = elms->map();
   if (map == heap->fixed_array_map()) {
-    if (args == NULL || !array->HasFastSmiOnlyElements()) {
+    if (args == NULL || array->HasFastElements()) return elms;
+    if (array->HasFastDoubleElements()) {
+      ASSERT(elms == heap->empty_fixed_array());
+      MaybeObject* maybe_transition =
+          array->TransitionElementsKind(FAST_ELEMENTS);
+      if (maybe_transition->IsFailure()) return maybe_transition;
       return elms;
     }
   } else if (map == heap->fixed_cow_array_map()) {
     MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
-    if (args == NULL || !array->HasFastSmiOnlyElements() ||
+    if (args == NULL || array->HasFastElements() ||
         maybe_writable_result->IsFailure()) {
       return maybe_writable_result;
     }
@@ -1098,7 +1103,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper(
 
     CustomArguments custom(isolate);
     v8::ImplementationUtilities::PrepareArgumentsData(custom.end(),
-        data_obj, *function, raw_holder);
+        isolate, data_obj, *function, raw_holder);
 
     v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
         custom.end(),
@@ -1138,68 +1143,6 @@ BUILTIN(HandleApiCallConstruct) {
 }
 
 
-#ifdef DEBUG
-
-static void VerifyTypeCheck(Handle<JSObject> object,
-                            Handle<JSFunction> function) {
-  ASSERT(function->shared()->IsApiFunction());
-  FunctionTemplateInfo* info = function->shared()->get_api_func_data();
-  if (info->signature()->IsUndefined()) return;
-  SignatureInfo* signature = SignatureInfo::cast(info->signature());
-  Object* receiver_type = signature->receiver();
-  if (receiver_type->IsUndefined()) return;
-  FunctionTemplateInfo* type = FunctionTemplateInfo::cast(receiver_type);
-  ASSERT(object->IsInstanceOf(type));
-}
-
-#endif
-
-
-BUILTIN(FastHandleApiCall) {
-  ASSERT(!CalledAsConstructor(isolate));
-  Heap* heap = isolate->heap();
-  const bool is_construct = false;
-
-  // We expect four more arguments: callback, function, call data, and holder.
-  const int args_length = args.length() - 4;
-  ASSERT(args_length >= 0);
-
-  Object* callback_obj = args[args_length];
-
-  v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
-      &args[args_length + 1],
-      &args[0] - 1,
-      args_length - 1,
-      is_construct);
-
-#ifdef DEBUG
-  VerifyTypeCheck(Utils::OpenHandle(*new_args.Holder()),
-                  Utils::OpenHandle(*new_args.Callee()));
-#endif
-  HandleScope scope(isolate);
-  Object* result;
-  v8::Handle<v8::Value> value;
-  {
-    // Leaving JavaScript.
-    VMState state(isolate, EXTERNAL);
-    ExternalCallbackScope call_scope(isolate,
-                                     v8::ToCData<Address>(callback_obj));
-    v8::InvocationCallback callback =
-        v8::ToCData<v8::InvocationCallback>(callback_obj);
-
-    value = callback(new_args);
-  }
-  if (value.IsEmpty()) {
-    result = heap->undefined_value();
-  } else {
-    result = *reinterpret_cast<Object**>(*value);
-  }
-
-  RETURN_IF_SCHEDULED_EXCEPTION(isolate);
-  return result;
-}
-
-
 // Helper function to handle calls to non-function objects created through the
 // API. The object can be called as either a constructor (using new) or just as
 // a function (without new).
@@ -1238,7 +1181,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor(
 
     CustomArguments custom(isolate);
     v8::ImplementationUtilities::PrepareArgumentsData(custom.end(),
-        call_data->data(), constructor, obj);
+        isolate, call_data->data(), constructor, obj);
     v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
         custom.end(),
         &args[0] - 1,
index f079139..3ea3393 100644 (file)
@@ -56,7 +56,6 @@ enum BuiltinExtraArguments {
   V(ArrayConcat, NO_EXTRA_ARGUMENTS)                                \
                                                                     \
   V(HandleApiCall, NEEDS_CALLED_FUNCTION)                           \
-  V(FastHandleApiCall, NO_EXTRA_ARGUMENTS)                          \
   V(HandleApiCallConstruct, NEEDS_CALLED_FUNCTION)                  \
   V(HandleApiCallAsFunction, NO_EXTRA_ARGUMENTS)                    \
   V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS)                 \
index 11016c8..814e358 100644 (file)
@@ -73,21 +73,12 @@ SmartArrayPointer<const char> CodeStub::GetName() {
 
 
 void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
-  code->set_major_key(MajorKey());
-
   Isolate* isolate = masm->isolate();
   SmartArrayPointer<const char> name = GetName();
   PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, *name));
   GDBJIT(AddCode(GDBJITInterface::STUB, *name, code));
   Counters* counters = isolate->counters();
   counters->total_stubs_code_size()->Increment(code->instruction_size());
-
-#ifdef ENABLE_DISASSEMBLER
-  if (FLAG_print_code_stubs) {
-    code->Disassemble(*name);
-    PrintF("\n");
-  }
-#endif
 }
 
 
@@ -125,8 +116,16 @@ Handle<Code> CodeStub::GetCode() {
         GetICState());
     Handle<Code> new_object = factory->NewCode(
         desc, flags, masm.CodeObject(), NeedsImmovableCode());
-    RecordCodeGeneration(*new_object, &masm);
+    new_object->set_major_key(MajorKey());
     FinishCode(new_object);
+    RecordCodeGeneration(*new_object, &masm);
+
+#ifdef ENABLE_DISASSEMBLER
+    if (FLAG_print_code_stubs) {
+      new_object->Disassemble(*GetName());
+      PrintF("\n");
+    }
+#endif
 
     if (UseSpecialCache()) {
       AddToSpecialCache(new_object);
index 4b6ceb2..5c87178 100644 (file)
@@ -343,7 +343,7 @@ class FastNewContextStub : public CodeStub {
   static const int kMaximumSlots = 64;
 
   explicit FastNewContextStub(int slots) : slots_(slots) {
-    ASSERT(slots_ >= 0 && slots_ <= kMaximumSlots);
+    ASSERT(slots_ > 0 && slots_ <= kMaximumSlots);
   }
 
   void Generate(MacroAssembler* masm);
@@ -361,7 +361,7 @@ class FastNewBlockContextStub : public CodeStub {
   static const int kMaximumSlots = 64;
 
   explicit FastNewBlockContextStub(int slots) : slots_(slots) {
-    ASSERT(slots_ >= 0 && slots_ <= kMaximumSlots);
+    ASSERT(slots_ > 0 && slots_ <= kMaximumSlots);
   }
 
   void Generate(MacroAssembler* masm);
@@ -498,6 +498,7 @@ class ICCompareStub: public CodeStub {
 
   virtual void FinishCode(Handle<Code> code) {
     code->set_compare_state(state_);
+    code->set_compare_operation(op_);
   }
 
   virtual CodeStub::Major MajorKey() { return CompareIC; }
index 3b9c59e..b73e8ac 100644 (file)
@@ -40,6 +40,9 @@ class CompilerIntrinsics {
   // Returns number of zero bits following most significant 1 bit.
   // Undefined for zero value.
   INLINE(static int CountLeadingZeros(uint32_t value));
+
+  // Returns the number of bits set.
+  INLINE(static int CountSetBits(uint32_t value));
 };
 
 #ifdef __GNUC__
@@ -51,6 +54,10 @@ int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
   return __builtin_clz(value);
 }
 
+int CompilerIntrinsics::CountSetBits(uint32_t value) {
+  return __builtin_popcount(value);
+}
+
 #elif defined(_MSC_VER)
 
 #pragma intrinsic(_BitScanForward)
@@ -68,6 +75,16 @@ int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
   return 31 - static_cast<int>(result);
 }
 
+int CompilerIntrinsics::CountSetBits(uint32_t value) {
+  // Manually count set bits.
+  value = ((value >>  1) & 0x55555555) + (value & 0x55555555);
+  value = ((value >>  2) & 0x33333333) + (value & 0x33333333);
+  value = ((value >>  4) & 0x0f0f0f0f) + (value & 0x0f0f0f0f);
+  value = ((value >>  8) & 0x00ff00ff) + (value & 0x00ff00ff);
+  value = ((value >> 16) & 0x0000ffff) + (value & 0x0000ffff);
+  return value;
+}
+
 #else
 #error Unsupported compiler
 #endif
index 6435068..ecac5cb 100644 (file)
@@ -118,7 +118,7 @@ bool CompilationInfo::ShouldSelfOptimize() {
       FLAG_crankshaft &&
       !function()->flags()->Contains(kDontSelfOptimize) &&
       !function()->flags()->Contains(kDontOptimize) &&
-      function()->scope()->allows_lazy_recompilation() &&
+      function()->scope()->AllowsLazyRecompilation() &&
       (shared_info().is_null() || !shared_info()->optimization_disabled());
 }
 
@@ -475,8 +475,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
                                              v8::Extension* extension,
                                              ScriptDataImpl* pre_data,
                                              Handle<Object> script_data,
-                                             NativesFlag natives,
-                                             v8::Script::CompileFlags compile_flags) {
+                                             NativesFlag natives) {
   Isolate* isolate = source->GetIsolate();
   int source_length = source->length();
   isolate->counters()->total_load_size()->Increment(source_length);
@@ -508,7 +507,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
 
     // Create a script object describing the script to be compiled.
     Handle<Script> script = FACTORY->NewScript(source);
-    if (natives == NATIVES_CODE || compile_flags & v8::Script::NativeMode) {
+    if (natives == NATIVES_CODE) {
       script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
     }
     if (!script_name.is_null()) {
@@ -528,7 +527,6 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
     if (FLAG_use_strict) {
       info.SetLanguageMode(FLAG_harmony_scoping ? EXTENDED_MODE : STRICT_MODE);
     }
-    if (compile_flags & v8::Script::QmlMode) info.MarkAsQmlMode();
     result = MakeFunctionInfo(&info);
     if (extension == NULL && !result.is_null()) {
       compilation_cache->PutScript(source, result);
@@ -548,8 +546,7 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
                                                  Handle<Context> context,
                                                  bool is_global,
                                                  LanguageMode language_mode,
-                                                 int scope_position,
-                                                 bool qml_mode) {
+                                                 int scope_position) {
   Isolate* isolate = source->GetIsolate();
   int source_length = source->length();
   isolate->counters()->total_eval_size()->Increment(source_length);
@@ -575,7 +572,6 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
     info.MarkAsEval();
     if (is_global) info.MarkAsGlobal();
     info.SetLanguageMode(language_mode);
-    if (qml_mode) info.MarkAsQmlMode();
     info.SetCallingContext(context);
     result = MakeFunctionInfo(&info);
     if (!result.is_null()) {
@@ -630,12 +626,6 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
     info->SetLanguageMode(language_mode);
     shared->set_language_mode(language_mode);
 
-    // After parsing we know function's qml mode. Remember it.
-    if (info->function()->qml_mode()) {
-      shared->set_qml_mode(true);
-      info->MarkAsQmlMode();
-    }
-
     // Compile the code.
     if (!MakeCode(info)) {
       if (!isolate->has_pending_exception()) {
@@ -785,7 +775,6 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
       *lit->this_property_assignments());
   function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
   function_info->set_language_mode(lit->language_mode());
-  function_info->set_qml_mode(lit->qml_mode());
   function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
   function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
   function_info->set_ast_node_count(lit->ast_node_count());
index 09583c0..44df9e0 100644 (file)
@@ -58,7 +58,6 @@ class CompilationInfo BASE_EMBEDDED {
     return LanguageModeField::decode(flags_);
   }
   bool is_in_loop() const { return IsInLoop::decode(flags_); }
-  bool is_qml_mode() const { return IsQmlMode::decode(flags_); }
   FunctionLiteral* function() const { return function_; }
   Scope* scope() const { return scope_; }
   Scope* global_scope() const { return global_scope_; }
@@ -89,9 +88,6 @@ class CompilationInfo BASE_EMBEDDED {
     ASSERT(is_lazy());
     flags_ |= IsInLoop::encode(true);
   }
-  void MarkAsQmlMode() {
-    flags_ |= IsQmlMode::encode(true);
-  }
   void MarkAsNative() {
     flags_ |= IsNative::encode(true);
   }
@@ -200,9 +196,6 @@ class CompilationInfo BASE_EMBEDDED {
       ASSERT(language_mode() == CLASSIC_MODE);
       SetLanguageMode(shared_info_->language_mode());
     }
-    if (!shared_info_.is_null() && shared_info_->qml_mode()) {
-      MarkAsQmlMode();
-    }
   }
 
   void SetMode(Mode mode) {
@@ -229,8 +222,7 @@ class CompilationInfo BASE_EMBEDDED {
   // If compiling for debugging produce just full code matching the
   // initial mode setting.
   class IsCompilingForDebugging: public BitField<bool, 8, 1> {};
-  // Qml mode
-  class IsQmlMode: public BitField<bool, 9, 1> {};
+
 
   unsigned flags_;
 
@@ -300,16 +292,14 @@ class Compiler : public AllStatic {
                                             v8::Extension* extension,
                                             ScriptDataImpl* pre_data,
                                             Handle<Object> script_data,
-                                            NativesFlag is_natives_code,
-                                            v8::Script::CompileFlags = v8::Script::Default);
+                                            NativesFlag is_natives_code);
 
   // Compile a String source within a context for Eval.
   static Handle<SharedFunctionInfo> CompileEval(Handle<String> source,
                                                 Handle<Context> context,
                                                 bool is_global,
                                                 LanguageMode language_mode,
-                                                int scope_position,
-                                                bool qml_mode);
+                                                int scope_position);
 
   // Compile from function info (used for lazy compilation). Returns true on
   // success and false if the compilation resulted in a stack overflow.
index cf07cc6..76784bd 100644 (file)
@@ -103,9 +103,6 @@ Handle<Object> Context::Lookup(Handle<String> name,
     PrintF(")\n");
   }
 
-  Handle<JSObject> qml_global;
-  Handle<JSObject> qml_global_global;
-
   do {
     if (FLAG_trace_contexts) {
       PrintF(" - looking in context %p", reinterpret_cast<void*>(*context));
@@ -113,11 +110,6 @@ Handle<Object> Context::Lookup(Handle<String> name,
       PrintF("\n");
     }
 
-    if (qml_global.is_null() && !context->qml_global()->IsUndefined()) {
-      qml_global = Handle<JSObject>(context->qml_global(), isolate);
-      qml_global_global = Handle<JSObject>(context->global(), isolate);
-    }
-
     // 1. Check global objects, subjects of with, and extension objects.
     if (context->IsGlobalContext() ||
         context->IsWithContext() ||
@@ -241,33 +233,6 @@ Handle<Object> Context::Lookup(Handle<String> name,
     }
   } while (follow_context_chain);
 
-  if (!qml_global.is_null()) {
-    if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0) {
-      *attributes = qml_global_global->GetLocalPropertyAttribute(*name);
-    } else {
-      *attributes = qml_global_global->GetPropertyAttribute(*name);
-    }
-
-    if (*attributes != ABSENT) {
-      *attributes = ABSENT;
-    } else {
-      if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0) {
-        *attributes = qml_global->GetLocalPropertyAttribute(*name);
-      } else {
-        *attributes = qml_global->GetPropertyAttribute(*name);
-      }
-
-      if (*attributes != ABSENT) {
-        // property found
-        if (FLAG_trace_contexts) {
-          PrintF("=> found property in qml global object %p\n",
-                 reinterpret_cast<void*>(*qml_global));
-        }
-        return qml_global;
-      }
-    }
-  }
-
   if (FLAG_trace_contexts) {
     PrintF("=> no property/slot found\n");
   }
index 326e6db..647c15c 100644 (file)
@@ -221,7 +221,6 @@ class Context: public FixedArray {
     // (with contexts), or the variable name (catch contexts), the serialized
     // scope info (block contexts).
     EXTENSION_INDEX,
-    QML_GLOBAL_INDEX,
     GLOBAL_INDEX,
     MIN_CONTEXT_SLOTS,
 
@@ -328,9 +327,6 @@ class Context: public FixedArray {
   }
   void set_global(GlobalObject* global) { set(GLOBAL_INDEX, global); }
 
-  JSObject *qml_global() { return reinterpret_cast<JSObject *>(get(QML_GLOBAL_INDEX)); }
-  void set_qml_global(JSObject *qml_global) { set(QML_GLOBAL_INDEX, qml_global); }
-
   // Returns a JSGlobalProxy object or null.
   JSObject* global_proxy();
   void set_global_proxy(JSObject* global);
@@ -401,7 +397,7 @@ class Context: public FixedArray {
   GLOBAL_CONTEXT_FIELDS(GLOBAL_CONTEXT_FIELD_ACCESSORS)
 #undef GLOBAL_CONTEXT_FIELD_ACCESSORS
 
-  // Lookup the the slot called name, starting with the current context.
+  // Lookup the slot called name, starting with the current context.
   // There are three possibilities:
   //
   // 1) result->IsContext():
index 1e8b4c8..ddd4100 100644 (file)
@@ -26,7 +26,8 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 
-#ifdef USING_V8_SHARED  // Defined when linking against shared lib on Windows.
+// Defined when linking against shared lib on Windows.
+#if defined(USING_V8_SHARED) && !defined(V8_SHARED)
 #define V8_SHARED
 #endif
 
@@ -315,9 +316,10 @@ static size_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
 }
 
 
-const char kArrayBufferReferencePropName[] = "_is_array_buffer_";
-const char kArrayBufferMarkerPropName[] = "_array_buffer_ref_";
+const char kArrayBufferMarkerPropName[] = "_is_array_buffer_";
+const char kArrayBufferReferencePropName[] = "_array_buffer_ref_";
 
+static const int kExternalArrayAllocationHeaderSize = 2;
 
 Handle<Value> Shell::CreateExternalArray(const Arguments& args,
                                          ExternalArrayType type,
@@ -352,10 +354,11 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
 
   Local<Value> length_value = (args.Length() < 3)
       ? (first_arg_is_array_buffer
-         ? args[0]->ToObject()->Get(String::New("length"))
+         ? args[0]->ToObject()->Get(String::New("byteLength"))
          : args[0])
       : args[2];
-  size_t length = convertToUint(length_value, &try_catch);
+  size_t byteLength = convertToUint(length_value, &try_catch);
+  size_t length = byteLength;
   if (try_catch.HasCaught()) return try_catch.Exception();
 
   void* data = NULL;
@@ -367,7 +370,7 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
     data = derived_from->GetIndexedPropertiesExternalArrayData();
 
     size_t array_buffer_length = convertToUint(
-        derived_from->Get(String::New("length")),
+        derived_from->Get(String::New("byteLength")),
         &try_catch);
     if (try_catch.HasCaught()) return try_catch.Exception();
 
@@ -433,13 +436,14 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
       return ThrowException(String::New("Array exceeds maximum size (2G)"));
     }
     // Prepend the size of the allocated chunk to the data itself.
-    int total_size = length * element_size + sizeof(size_t);
+    int total_size = length * element_size +
+        kExternalArrayAllocationHeaderSize * sizeof(size_t);
     data = malloc(total_size);
     if (data == NULL) {
       return ThrowException(String::New("Memory allocation failed."));
     }
     *reinterpret_cast<size_t*>(data) = total_size;
-    data = reinterpret_cast<size_t*>(data) + 1;
+    data = reinterpret_cast<size_t*>(data) + kExternalArrayAllocationHeaderSize;
     memset(data, 0, length * element_size);
     V8::AdjustAmountOfExternalAllocatedMemory(total_size);
   }
@@ -449,10 +453,20 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
   array->SetIndexedPropertiesToExternalArrayData(
       reinterpret_cast<uint8_t*>(data) + offset, type,
       static_cast<int>(length));
-  array->Set(String::New("length"),
-             Int32::New(static_cast<int32_t>(length)), ReadOnly);
-  array->Set(String::New("BYTES_PER_ELEMENT"),
-             Int32::New(static_cast<int32_t>(element_size)));
+  array->Set(String::New("byteLength"),
+             Int32::New(static_cast<int32_t>(byteLength)), ReadOnly);
+  if (!is_array_buffer_construct) {
+    array->Set(String::New("length"),
+               Int32::New(static_cast<int32_t>(length)), ReadOnly);
+    array->Set(String::New("byteOffset"),
+               Int32::New(static_cast<int32_t>(offset)), ReadOnly);
+    array->Set(String::New("BYTES_PER_ELEMENT"),
+               Int32::New(static_cast<int32_t>(element_size)));
+    // We currently support 'buffer' property only if constructed from a buffer.
+    if (first_arg_is_array_buffer) {
+      array->Set(String::New("buffer"), args[0], ReadOnly);
+    }
+  }
   return array;
 }
 
@@ -463,7 +477,7 @@ void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) {
   Handle<Object> converted_object = object->ToObject();
   Local<Value> prop_value = converted_object->Get(prop_name);
   if (data != NULL && !prop_value->IsObject()) {
-    data = reinterpret_cast<size_t*>(data) - 1;
+    data = reinterpret_cast<size_t*>(data) - kExternalArrayAllocationHeaderSize;
     V8::AdjustAmountOfExternalAllocatedMemory(
         -static_cast<int>(*reinterpret_cast<size_t*>(data)));
     free(data);
@@ -822,6 +836,8 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
   global_template->Set(String::New("read"), FunctionTemplate::New(Read));
   global_template->Set(String::New("readbinary"),
                        FunctionTemplate::New(ReadBinary));
+  global_template->Set(String::New("readbuffer"),
+                       FunctionTemplate::New(ReadBuffer));
   global_template->Set(String::New("readline"),
                        FunctionTemplate::New(ReadLine));
   global_template->Set(String::New("load"), FunctionTemplate::New(Load));
@@ -1057,6 +1073,32 @@ Handle<Value> Shell::ReadBinary(const Arguments& args) {
 }
 
 
+Handle<Value> Shell::ReadBuffer(const Arguments& args) {
+  String::Utf8Value filename(args[0]);
+  int length;
+  if (*filename == NULL) {
+    return ThrowException(String::New("Error loading file"));
+  }
+  char* data = ReadChars(*filename, &length);
+  if (data == NULL) {
+    return ThrowException(String::New("Error reading file"));
+  }
+
+  Handle<Object> buffer = Object::New();
+  buffer->Set(String::New(kArrayBufferMarkerPropName), True(), ReadOnly);
+
+  Persistent<Object> persistent_buffer = Persistent<Object>::New(buffer);
+  persistent_buffer.MakeWeak(data, ExternalArrayWeakCallback);
+  persistent_buffer.MarkIndependent();
+
+  buffer->SetIndexedPropertiesToExternalArrayData(
+      reinterpret_cast<uint8_t*>(data), kExternalUnsignedByteArray, length);
+  buffer->Set(String::New("byteLength"),
+             Int32::New(static_cast<int32_t>(length)), ReadOnly);
+  return buffer;
+}
+
+
 #ifndef V8_SHARED
 static char* ReadToken(char* data, char token) {
   char* next = i::OS::StrChr(data, token);
index c872f90..23fdebc 100644 (file)
@@ -308,6 +308,7 @@ class Shell : public i::AllStatic {
   static Handle<Value> DisableProfiler(const Arguments& args);
   static Handle<Value> Read(const Arguments& args);
   static Handle<Value> ReadBinary(const Arguments& args);
+  static Handle<Value> ReadBuffer(const Arguments& args);
   static Handle<String> ReadFromStdin();
   static Handle<Value> ReadLine(const Arguments& args) {
     return ReadFromStdin();
index 511663d..10c0053 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -247,7 +247,7 @@ SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
     while (!(c == '\n' && prev_c == '\r')) {
       prev_c = c;
       received = conn->Receive(&c, 1);
-      if (received <= 0) {
+      if (received == 0) {
         PrintF("Error %d\n", Socket::LastError());
         return SmartArrayPointer<char>();
       }
@@ -454,7 +454,7 @@ int DebuggerAgentUtil::ReceiveAll(const Socket* conn, char* data, int len) {
   int total_received = 0;
   while (total_received < len) {
     int received = conn->Receive(data + total_received, len - total_received);
-    if (received <= 0) {
+    if (received == 0) {
       return total_received;
     }
     total_received += received;
index 3a58bda..91838e8 100644 (file)
@@ -449,11 +449,6 @@ ScriptBreakPoint.prototype.set = function (script) {
     actual_position = position;
   }
   var actual_location = script.locationFromPosition(actual_position, true);
-  // Check for any relocation and compare it with the breakpoint_relocation flag
-  if (actual_location.line != line && !%AllowBreakPointRelocation()) {
-    %ClearBreakPoint(break_point);
-    return;
-  }
   break_point.actual_location = { line: actual_location.line,
                                   column: actual_location.column,
                                   script_id: script.id };
@@ -1962,7 +1957,7 @@ DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
   if (request.arguments && !IS_UNDEFINED(request.arguments.frameNumber)) {
     frame_index = request.arguments.frameNumber;
     if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) {
-      return response.failed('Invalid frame number');
+      throw new Error('Invalid frame number');
     }
     return this.exec_state_.frame(frame_index);
   } else {
@@ -1971,20 +1966,44 @@ DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
 };
 
 
-DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
-  // No frames no scopes.
-  if (this.exec_state_.frameCount() == 0) {
-    return response.failed('No scopes');
+// Gets scope host object from request. It is either a function
+// ('functionHandle' argument must be specified) or a stack frame
+// ('frameNumber' may be specified and the current frame is taken by default).
+DebugCommandProcessor.prototype.scopeHolderForScopeRequest_ =
+    function(request) {
+  if (request.arguments && "functionHandle" in request.arguments) {
+    if (!IS_NUMBER(request.arguments.functionHandle)) {
+      throw new Error('Function handle must be a number');
+    }
+    var function_mirror = LookupMirror(request.arguments.functionHandle);
+    if (!function_mirror) {
+      throw new Error('Failed to find function object by handle');
+    }
+    if (!function_mirror.isFunction()) {
+      throw new Error('Value of non-function type is found by handle');
+    }
+    return function_mirror;
+  } else {
+    // No frames no scopes.
+    if (this.exec_state_.frameCount() == 0) {
+      throw new Error('No scopes');
+    }
+
+    // Get the frame for which the scopes are requested.
+    var frame = this.frameForScopeRequest_(request);
+    return frame;
   }
+}
 
-  // Get the frame for which the scopes are requested.
-  var frame = this.frameForScopeRequest_(request);
 
-  // Fill all scopes for this frame.
-  var total_scopes = frame.scopeCount();
+DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
+  var scope_holder = this.scopeHolderForScopeRequest_(request);
+
+  // Fill all scopes for this frame or function.
+  var total_scopes = scope_holder.scopeCount();
   var scopes = [];
   for (var i = 0; i < total_scopes; i++) {
-    scopes.push(frame.scope(i));
+    scopes.push(scope_holder.scope(i));
   }
   response.body = {
     fromScope: 0,
@@ -1996,24 +2015,19 @@ DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
 
 
 DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
-  // No frames no scopes.
-  if (this.exec_state_.frameCount() == 0) {
-    return response.failed('No scopes');
-  }
-
-  // Get the frame for which the scope is requested.
-  var frame = this.frameForScopeRequest_(request);
+  // Get the frame or function for which the scope is requested.
+  var scope_holder = this.scopeHolderForScopeRequest_(request);
 
   // With no scope argument just return top scope.
   var scope_index = 0;
   if (request.arguments && !IS_UNDEFINED(request.arguments.number)) {
     scope_index = %ToNumber(request.arguments.number);
-    if (scope_index < 0 || frame.scopeCount() <= scope_index) {
+    if (scope_index < 0 || scope_holder.scopeCount() <= scope_index) {
       return response.failed('Invalid scope number');
     }
   }
 
-  response.body = frame.scope(scope_index);
+  response.body = scope_holder.scope(scope_index);
 };
 
 
index 99256ba..9efb5c3 100644 (file)
@@ -892,6 +892,16 @@ void Debug::Iterate(ObjectVisitor* v) {
 }
 
 
+void Debug::PutValuesOnStackAndDie(int start,
+                                   Address c_entry_fp,
+                                   Address last_fp,
+                                   Address larger_fp,
+                                   int count,
+                                   int end) {
+  OS::Abort();
+}
+
+
 Object* Debug::Break(Arguments args) {
   Heap* heap = isolate_->heap();
   HandleScope scope(isolate_);
@@ -984,11 +994,34 @@ Object* Debug::Break(Arguments args) {
       // Count frames until target frame
       int count = 0;
       JavaScriptFrameIterator it(isolate_);
-      while (!it.done() && it.frame()->fp() != thread_local_.last_fp_) {
+      while (!it.done() && it.frame()->fp() < thread_local_.last_fp_) {
         count++;
         it.Advance();
       }
 
+      // Catch the cases that would lead to crashes and capture
+      // - C entry FP at which to start stack crawl.
+      // - FP of the frame at which we plan to stop stepping out (last FP).
+      // - current FP that's larger than last FP.
+      // - Counter for the number of steps to step out.
+      if (it.done()) {
+        // We crawled the entire stack, never reaching last_fp_.
+        PutValuesOnStackAndDie(0xBEEEEEEE,
+                               frame->fp(),
+                               thread_local_.last_fp_,
+                               NULL,
+                               count,
+                               0xFEEEEEEE);
+      } else if (it.frame()->fp() != thread_local_.last_fp_) {
+        // We crawled over last_fp_, without getting a match.
+        PutValuesOnStackAndDie(0xBEEEEEEE,
+                               frame->fp(),
+                               thread_local_.last_fp_,
+                               it.frame()->fp(),
+                               count,
+                               0xFEEEEEEE);
+      }
+
       // If we found original frame
       if (it.frame()->fp() == thread_local_.last_fp_) {
         if (step_count > 1) {
@@ -2227,6 +2260,13 @@ void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
 }
 
 
+const int Debug::FramePaddingLayout::kInitialSize = 1;
+
+
+// Any even value bigger than kInitialSize as needed for stack scanning.
+const int Debug::FramePaddingLayout::kPaddingValue = kInitialSize + 1;
+
+
 bool Debug::IsDebugGlobal(GlobalObject* global) {
   return IsLoaded() && global == debug_context()->global();
 }
index 474b90b..d9c966c 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -232,6 +232,12 @@ class Debug {
   void PreemptionWhileInDebugger();
   void Iterate(ObjectVisitor* v);
 
+  NO_INLINE(void PutValuesOnStackAndDie(int start,
+                                        Address c_entry_fp,
+                                        Address last_fp,
+                                        Address larger_fp,
+                                        int count,
+                                        int end));
   Object* Break(Arguments args);
   void SetBreakPoint(Handle<SharedFunctionInfo> shared,
                      Handle<Object> break_point_object,
@@ -245,6 +251,8 @@ class Debug {
   bool IsBreakOnException(ExceptionBreakType type);
   void PrepareStep(StepAction step_action, int step_count);
   void ClearStepping();
+  void ClearStepOut();
+  bool IsStepping() { return thread_local_.step_count_ > 0; }
   bool StepNextContinue(BreakLocationIterator* break_location_iterator,
                         JavaScriptFrame* frame);
   static Handle<DebugInfo> GetDebugInfo(Handle<SharedFunctionInfo> shared);
@@ -455,6 +463,50 @@ class Debug {
   // Architecture-specific constant.
   static const bool kFrameDropperSupported;
 
+  /**
+   * Defines layout of a stack frame that supports padding. This is a regular
+   * internal frame that has a flexible stack structure. LiveEdit can shift
+   * its lower part up the stack, taking up the 'padding' space when additional
+   * stack memory is required.
+   * Such frame is expected immediately above the topmost JavaScript frame.
+   *
+   * Stack Layout:
+   *   --- Top
+   *   LiveEdit routine frames
+   *   ---
+   *   C frames of debug handler
+   *   ---
+   *   ...
+   *   ---
+   *      An internal frame that has n padding words:
+   *      - any number of words as needed by code -- upper part of frame
+   *      - padding size: a Smi storing n -- current size of padding
+   *      - padding: n words filled with kPaddingValue in form of Smi
+   *      - 3 context/type words of a regular InternalFrame
+   *      - fp
+   *   ---
+   *      Topmost JavaScript frame
+   *   ---
+   *   ...
+   *   --- Bottom
+   */
+  class FramePaddingLayout : public AllStatic {
+   public:
+    // Architecture-specific constant.
+    static const bool kIsSupported;
+
+    // A size of frame base including fp. Padding words starts right above
+    // the base.
+    static const int kFrameBaseSize = 4;
+
+    // A number of words that should be reserved on stack for the LiveEdit use.
+    // Normally equals 1. Stored on stack in form of Smi.
+    static const int kInitialSize;
+    // A value that padding words are filled with (in form of Smi). Going
+    // bottom-top, the first word not having this value is a counter word.
+    static const int kPaddingValue;
+  };
+
  private:
   explicit Debug(Isolate* isolate);
   ~Debug();
@@ -464,7 +516,6 @@ class Debug {
   void ActivateStepIn(StackFrame* frame);
   void ClearStepIn();
   void ActivateStepOut(StackFrame* frame);
-  void ClearStepOut();
   void ClearStepNext();
   // Returns whether the compile succeeded.
   void RemoveDebugInfo(Handle<DebugInfo> debug_info);
index 16a3245..fcf6906 100644 (file)
@@ -130,12 +130,6 @@ class Double {
     return (d64 & kExponentMask) == kExponentMask;
   }
 
-  bool IsNan() const {
-    uint64_t d64 = AsUint64();
-    return ((d64 & kExponentMask) == kExponentMask) &&
-        ((d64 & kSignificandMask) != 0);
-  }
-
   bool IsInfinite() const {
     uint64_t d64 = AsUint64();
     return ((d64 & kExponentMask) == kExponentMask) &&
index 26d3dc1..d367af8 100644 (file)
@@ -424,10 +424,10 @@ class ElementsAccessorBase : public ElementsAccessor {
         receiver, holder, key, BackingStore::cast(backing_store));
   }
 
-  virtual MaybeObject* Get(Object* receiver,
-                           JSObject* holder,
-                           uint32_t key,
-                           FixedArrayBase* backing_store) {
+  MUST_USE_RESULT virtual MaybeObject* Get(Object* receiver,
+                                           JSObject* holder,
+                                           uint32_t key,
+                                           FixedArrayBase* backing_store) {
     if (backing_store == NULL) {
       backing_store = holder->elements();
     }
@@ -435,62 +435,64 @@ class ElementsAccessorBase : public ElementsAccessor {
         receiver, holder, key, BackingStore::cast(backing_store));
   }
 
-  static MaybeObject* GetImpl(Object* receiver,
-                              JSObject* obj,
-                              uint32_t key,
-                              BackingStore* backing_store) {
+  MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
+                                              JSObject* obj,
+                                              uint32_t key,
+                                              BackingStore* backing_store) {
     return (key < ElementsAccessorSubclass::GetCapacityImpl(backing_store))
            ? backing_store->get(key)
            : backing_store->GetHeap()->the_hole_value();
   }
 
-  virtual MaybeObject* SetLength(JSArray* array,
-                                 Object* length) {
+  MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* array,
+                                                 Object* length) {
     return ElementsAccessorSubclass::SetLengthImpl(
         array, length, BackingStore::cast(array->elements()));
   }
 
-  static MaybeObject* SetLengthImpl(JSObject* obj,
-                                    Object* length,
-                                    BackingStore* backing_store);
+  MUST_USE_RESULT static MaybeObject* SetLengthImpl(
+      JSObject* obj,
+      Object* length,
+      BackingStore* backing_store);
 
-  virtual MaybeObject* SetCapacityAndLength(JSArray* array,
-                                            int capacity,
-                                            int length) {
+  MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(JSArray* array,
+                                                            int capacity,
+                                                            int length) {
     return ElementsAccessorSubclass::SetFastElementsCapacityAndLength(
         array,
         capacity,
         length);
   }
 
-  static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
-                                                       int capacity,
-                                                       int length) {
+  MUST_USE_RESULT static MaybeObject* SetFastElementsCapacityAndLength(
+      JSObject* obj,
+      int capacity,
+      int length) {
     UNIMPLEMENTED();
     return obj;
   }
 
-  virtual MaybeObject* Delete(JSObject* obj,
-                              uint32_t key,
-                              JSReceiver::DeleteMode mode) = 0;
+  MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
+                                              uint32_t key,
+                                              JSReceiver::DeleteMode mode) = 0;
 
-  static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
-                                       uint32_t from_start,
-                                       FixedArrayBase* to,
-                                       ElementsKind to_kind,
-                                       uint32_t to_start,
-                                       int copy_size) {
+  MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
+                                                       uint32_t from_start,
+                                                       FixedArrayBase* to,
+                                                       ElementsKind to_kind,
+                                                       uint32_t to_start,
+                                                       int copy_size) {
     UNREACHABLE();
     return NULL;
   }
 
-  virtual MaybeObject* CopyElements(JSObject* from_holder,
-                                    uint32_t from_start,
-                                    FixedArrayBase* to,
-                                    ElementsKind to_kind,
-                                    uint32_t to_start,
-                                    int copy_size,
-                                    FixedArrayBase* from) {
+  MUST_USE_RESULT virtual MaybeObject* CopyElements(JSObject* from_holder,
+                                                    uint32_t from_start,
+                                                    FixedArrayBase* to,
+                                                    ElementsKind to_kind,
+                                                    uint32_t to_start,
+                                                    int copy_size,
+                                                    FixedArrayBase* from) {
     if (from == NULL) {
       from = from_holder->elements();
     }
@@ -501,10 +503,11 @@ class ElementsAccessorBase : public ElementsAccessor {
         from, from_start, to, to_kind, to_start, copy_size);
   }
 
-  virtual MaybeObject* AddElementsToFixedArray(Object* receiver,
-                                               JSObject* holder,
-                                               FixedArray* to,
-                                               FixedArrayBase* from) {
+  MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray(
+      Object* receiver,
+      JSObject* holder,
+      FixedArray* to,
+      FixedArrayBase* from) {
     int len0 = to->length();
 #ifdef DEBUG
     if (FLAG_enable_slow_asserts) {
@@ -866,27 +869,28 @@ class ExternalElementsAccessor
   friend class ElementsAccessorBase<ExternalElementsAccessorSubclass,
                                     ElementsKindTraits<Kind> >;
 
-  static MaybeObject* GetImpl(Object* receiver,
-                              JSObject* obj,
-                              uint32_t key,
-                              BackingStore* backing_store) {
+  MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
+                                              JSObject* obj,
+                                              uint32_t key,
+                                              BackingStore* backing_store) {
     return
         key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
         ? backing_store->get(key)
         : backing_store->GetHeap()->undefined_value();
   }
 
-  static MaybeObject* SetLengthImpl(JSObject* obj,
-                                    Object* length,
-                                    BackingStore* backing_store) {
+  MUST_USE_RESULT static MaybeObject* SetLengthImpl(
+      JSObject* obj,
+      Object* length,
+      BackingStore* backing_store) {
     // External arrays do not support changing their length.
     UNREACHABLE();
     return obj;
   }
 
-  virtual MaybeObject* Delete(JSObject* obj,
-                              uint32_t key,
-                              JSReceiver::DeleteMode mode) {
+  MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
+                                              uint32_t key,
+                                              JSReceiver::DeleteMode mode) {
     // External arrays always ignore deletes.
     return obj->GetHeap()->true_value();
   }
@@ -1002,10 +1006,11 @@ class DictionaryElementsAccessor
 
   // Adjusts the length of the dictionary backing store and returns the new
   // length according to ES5 section 15.4.5.2 behavior.
-  static MaybeObject* SetLengthWithoutNormalize(SeededNumberDictionary* dict,
-                                                JSArray* array,
-                                                Object* length_object,
-                                                uint32_t length) {
+  MUST_USE_RESULT static MaybeObject* SetLengthWithoutNormalize(
+      SeededNumberDictionary* dict,
+      JSArray* array,
+      Object* length_object,
+      uint32_t length) {
     if (length == 0) {
       // If the length of a slow array is reset to zero, we clear
       // the array and flush backing storage. This has the added
@@ -1057,9 +1062,10 @@ class DictionaryElementsAccessor
     return length_object;
   }
 
-  static MaybeObject* DeleteCommon(JSObject* obj,
-                                   uint32_t key,
-                                   JSReceiver::DeleteMode mode) {
+  MUST_USE_RESULT static MaybeObject* DeleteCommon(
+      JSObject* obj,
+      uint32_t key,
+      JSReceiver::DeleteMode mode) {
     Isolate* isolate = obj->GetIsolate();
     Heap* heap = isolate->heap();
     FixedArray* backing_store = FixedArray::cast(obj->elements());
@@ -1102,12 +1108,12 @@ class DictionaryElementsAccessor
     return heap->true_value();
   }
 
-  static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
-                                       uint32_t from_start,
-                                       FixedArrayBase* to,
-                                       ElementsKind to_kind,
-                                       uint32_t to_start,
-                                       int copy_size) {
+  MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
+                                                       uint32_t from_start,
+                                                       FixedArrayBase* to,
+                                                       ElementsKind to_kind,
+                                                       uint32_t to_start,
+                                                       int copy_size) {
     switch (to_kind) {
       case FAST_SMI_ONLY_ELEMENTS:
       case FAST_ELEMENTS:
@@ -1131,16 +1137,17 @@ class DictionaryElementsAccessor
   friend class ElementsAccessorBase<DictionaryElementsAccessor,
                                     ElementsKindTraits<DICTIONARY_ELEMENTS> >;
 
-  virtual MaybeObject* Delete(JSObject* obj,
-                              uint32_t key,
-                              JSReceiver::DeleteMode mode) {
+  MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
+                                              uint32_t key,
+                                              JSReceiver::DeleteMode mode) {
     return DeleteCommon(obj, key, mode);
   }
 
-  static MaybeObject* GetImpl(Object* receiver,
-                              JSObject* obj,
-                              uint32_t key,
-                              SeededNumberDictionary* backing_store) {
+  MUST_USE_RESULT static MaybeObject* GetImpl(
+      Object* receiver,
+      JSObject* obj,
+      uint32_t key,
+      SeededNumberDictionary* backing_store) {
     int entry = backing_store->FindEntry(key);
     if (entry != SeededNumberDictionary::kNotFound) {
       Object* element = backing_store->ValueAt(entry);
@@ -1186,10 +1193,10 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
       NonStrictArgumentsElementsAccessor,
       ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> >;
 
-  static MaybeObject* GetImpl(Object* receiver,
-                              JSObject* obj,
-                              uint32_t key,
-                              FixedArray* parameter_map) {
+  MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
+                                              JSObject* obj,
+                                              uint32_t key,
+                                              FixedArray* parameter_map) {
     Object* probe = GetParameterMapArg(obj, parameter_map, key);
     if (!probe->IsTheHole()) {
       Context* context = Context::cast(parameter_map->get(0));
@@ -1216,18 +1223,19 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
     }
   }
 
-  static MaybeObject* SetLengthImpl(JSObject* obj,
-                                    Object* length,
-                                    FixedArray* parameter_map) {
+  MUST_USE_RESULT static MaybeObject* SetLengthImpl(
+      JSObject* obj,
+      Object* length,
+      FixedArray* parameter_map) {
     // TODO(mstarzinger): This was never implemented but will be used once we
     // correctly implement [[DefineOwnProperty]] on arrays.
     UNIMPLEMENTED();
     return obj;
   }
 
-  virtual MaybeObject* Delete(JSObject* obj,
-                              uint32_t key,
-                              JSReceiver::DeleteMode mode) {
+  MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* obj,
+                                              uint32_t key,
+                                              JSReceiver::DeleteMode mode) {
     FixedArray* parameter_map = FixedArray::cast(obj->elements());
     Object* probe = GetParameterMapArg(obj, parameter_map, key);
     if (!probe->IsTheHole()) {
@@ -1246,12 +1254,12 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
     return obj->GetHeap()->true_value();
   }
 
-  static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
-                                       uint32_t from_start,
-                                       FixedArrayBase* to,
-                                       ElementsKind to_kind,
-                                       uint32_t to_start,
-                                       int copy_size) {
+  MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
+                                                       uint32_t from_start,
+                                                       FixedArrayBase* to,
+                                                       ElementsKind to_kind,
+                                                       uint32_t to_start,
+                                                       int copy_size) {
     FixedArray* parameter_map = FixedArray::cast(from);
     FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
     ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments);
@@ -1354,8 +1362,8 @@ void ElementsAccessor::TearDown() {
 
 
 template <typename ElementsAccessorSubclass, typename ElementsKindTraits>
-MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
-                                  ElementsKindTraits>::
+MUST_USE_RESULT MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
+                                                  ElementsKindTraits>::
     SetLengthImpl(JSObject* obj,
                   Object* length,
                   typename ElementsKindTraits::BackingStore* backing_store) {
index 51d402d..55d6fa5 100644 (file)
@@ -60,18 +60,19 @@ class ElementsAccessor {
   // can optionally pass in the backing store to use for the check, which must
   // be compatible with the ElementsKind of the ElementsAccessor. If
   // backing_store is NULL, the holder->elements() is used as the backing store.
-  virtual MaybeObject* Get(Object* receiver,
-                           JSObject* holder,
-                           uint32_t key,
-                           FixedArrayBase* backing_store = NULL) = 0;
+  MUST_USE_RESULT virtual MaybeObject* Get(
+      Object* receiver,
+      JSObject* holder,
+      uint32_t key,
+      FixedArrayBase* backing_store = NULL) = 0;
 
   // Modifies the length data property as specified for JSArrays and resizes the
   // underlying backing store accordingly. The method honors the semantics of
   // changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
   // have non-deletable elements can only be shrunk to the size of highest
   // element that is non-deletable.
-  virtual MaybeObject* SetLength(JSArray* holder,
-                                 Object* new_length) = 0;
+  MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* holder,
+                                                 Object* new_length) = 0;
 
   // Modifies both the length and capacity of a JSArray, resizing the underlying
   // backing store as necessary. This method does NOT honor the semantics of
@@ -79,14 +80,14 @@ class ElementsAccessor {
   // elements. This method should only be called for array expansion OR by
   // runtime JavaScript code that use InternalArrays and don't care about
   // EcmaScript 5.1 semantics.
-  virtual MaybeObject* SetCapacityAndLength(JSArray* array,
-                                            int capacity,
-                                            int length) = 0;
+  MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(JSArray* array,
+                                                            int capacity,
+                                                            int length) = 0;
 
   // Deletes an element in an object, returning a new elements backing store.
-  virtual MaybeObject* Delete(JSObject* holder,
-                              uint32_t key,
-                              JSReceiver::DeleteMode mode) = 0;
+  MUST_USE_RESULT virtual MaybeObject* Delete(JSObject* holder,
+                                              uint32_t key,
+                                              JSReceiver::DeleteMode mode) = 0;
 
   // If kCopyToEnd is specified as the copy_size to CopyElements, it copies all
   // of elements from source after source_start to the destination array.
@@ -101,26 +102,28 @@ class ElementsAccessor {
   // the source JSObject or JSArray in source_holder. If the holder's backing
   // store is available, it can be passed in source and source_holder is
   // ignored.
-  virtual MaybeObject* CopyElements(JSObject* source_holder,
-                                    uint32_t source_start,
-                                    FixedArrayBase* destination,
-                                    ElementsKind destination_kind,
-                                    uint32_t destination_start,
-                                    int copy_size,
-                                    FixedArrayBase* source = NULL) = 0;
-
-  MaybeObject* CopyElements(JSObject* from_holder,
-                            FixedArrayBase* to,
-                            ElementsKind to_kind,
-                            FixedArrayBase* from = NULL) {
+  MUST_USE_RESULT virtual MaybeObject* CopyElements(
+      JSObject* source_holder,
+      uint32_t source_start,
+      FixedArrayBase* destination,
+      ElementsKind destination_kind,
+      uint32_t destination_start,
+      int copy_size,
+      FixedArrayBase* source = NULL) = 0;
+
+  MUST_USE_RESULT MaybeObject* CopyElements(JSObject* from_holder,
+                                            FixedArrayBase* to,
+                                            ElementsKind to_kind,
+                                            FixedArrayBase* from = NULL) {
     return CopyElements(from_holder, 0, to, to_kind, 0,
                         kCopyToEndAndInitializeToHole, from);
   }
 
-  virtual MaybeObject* AddElementsToFixedArray(Object* receiver,
-                                               JSObject* holder,
-                                               FixedArray* to,
-                                               FixedArrayBase* from = NULL) = 0;
+  MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray(
+      Object* receiver,
+      JSObject* holder,
+      FixedArray* to,
+      FixedArrayBase* from = NULL) = 0;
 
   // Returns a shared ElementsAccessor for the specified ElementsKind.
   static ElementsAccessor* ForKind(ElementsKind elements_kind) {
index 7e69abe..5618975 100644 (file)
@@ -71,8 +71,7 @@ static Handle<Object> Invoke(bool is_construct,
                              Handle<Object> receiver,
                              int argc,
                              Handle<Object> args[],
-                             bool* has_pending_exception,
-                             Handle<Object> qml) {
+                             bool* has_pending_exception) {
   Isolate* isolate = function->GetIsolate();
 
   // Entering JavaScript.
@@ -103,12 +102,6 @@ static Handle<Object> Invoke(bool is_construct,
   // make the current one is indeed a global object.
   ASSERT(function->context()->global()->IsGlobalObject());
 
-  Handle<JSObject> oldqml;
-  if (!qml.is_null()) {
-    oldqml = Handle<JSObject>(function->context()->qml_global());
-    function->context()->set_qml_global(JSObject::cast(*qml));
-  }
-
   {
     // Save and restore context around invocation and block the
     // allocation of handles without explicit handle scopes.
@@ -125,9 +118,6 @@ static Handle<Object> Invoke(bool is_construct,
         CALL_GENERATED_CODE(stub_entry, function_entry, func, recv, argc, argv);
   }
 
-  if (!qml.is_null())
-    function->context()->set_qml_global(*oldqml);
-
 #ifdef DEBUG
   value->Verify();
 #endif
@@ -156,18 +146,7 @@ Handle<Object> Execution::Call(Handle<Object> callable,
                                int argc,
                                Handle<Object> argv[],
                                bool* pending_exception,
-                               bool convert_receiver)
-{
-    return Call(callable, receiver, argc, argv, pending_exception, convert_receiver, Handle<Object>());
-}
-
-Handle<Object> Execution::Call(Handle<Object> callable,
-                               Handle<Object> receiver,
-                               int argc,
-                               Handle<Object> argv[],
-                               bool* pending_exception,
-                               bool convert_receiver,
-                               Handle<Object> qml) {
+                               bool convert_receiver) {
   *pending_exception = false;
 
   if (!callable->IsJSFunction()) {
@@ -191,7 +170,7 @@ Handle<Object> Execution::Call(Handle<Object> callable,
     if (*pending_exception) return callable;
   }
 
-  return Invoke(false, func, receiver, argc, argv, pending_exception, qml);
+  return Invoke(false, func, receiver, argc, argv, pending_exception);
 }
 
 
@@ -200,7 +179,7 @@ Handle<Object> Execution::New(Handle<JSFunction> func,
                               Handle<Object> argv[],
                               bool* pending_exception) {
   return Invoke(true, func, Isolate::Current()->global(), argc, argv,
-                pending_exception, Handle<Object>());
+                pending_exception);
 }
 
 
@@ -219,7 +198,7 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
   *caught_exception = false;
 
   Handle<Object> result = Invoke(false, func, receiver, argc, args,
-                                 caught_exception, Handle<Object>());
+                                 caught_exception);
 
   if (*caught_exception) {
     ASSERT(catcher.HasCaught());
index c33a675..01e4b9d 100644 (file)
@@ -69,14 +69,6 @@ class Execution : public AllStatic {
                              bool* pending_exception,
                              bool convert_receiver = false);
 
-  static Handle<Object> Call(Handle<Object> callable,
-                             Handle<Object> receiver,
-                             int argc,
-                             Handle<Object> argv[],
-                             bool* pending_exception,
-                             bool convert_receiver,
-                             Handle<Object> qml);
-
   // Construct object from function, the caller supplies an array of
   // arguments. Arguments are Object* type. After function returns,
   // pointers in 'args' might be invalid.
index ab0bb1f..6bb7893 100644 (file)
@@ -291,6 +291,15 @@ Handle<Context> Factory::NewGlobalContext() {
 }
 
 
+Handle<Context> Factory::NewModuleContext(Handle<Context> previous,
+                                          Handle<ScopeInfo> scope_info) {
+  CALL_HEAP_FUNCTION(
+      isolate(),
+      isolate()->heap()->AllocateModuleContext(*previous, *scope_info),
+      Context);
+}
+
+
 Handle<Context> Factory::NewFunctionContext(int length,
                                             Handle<JSFunction> function) {
   CALL_HEAP_FUNCTION(
@@ -324,10 +333,9 @@ Handle<Context> Factory::NewWithContext(Handle<JSFunction> function,
 }
 
 
-Handle<Context> Factory::NewBlockContext(
-    Handle<JSFunction> function,
-    Handle<Context> previous,
-    Handle<ScopeInfo> scope_info) {
+Handle<Context> Factory::NewBlockContext(Handle<JSFunction> function,
+                                         Handle<Context> previous,
+                                         Handle<ScopeInfo> scope_info) {
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateBlockContext(*function,
@@ -928,6 +936,13 @@ Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
 }
 
 
+Handle<JSModule> Factory::NewJSModule() {
+  CALL_HEAP_FUNCTION(
+      isolate(),
+      isolate()->heap()->AllocateJSModule(), JSModule);
+}
+
+
 Handle<GlobalObject> Factory::NewGlobalObject(
     Handle<JSFunction> constructor) {
   CALL_HEAP_FUNCTION(isolate(),
@@ -1219,24 +1234,15 @@ Handle<JSFunction> Factory::CreateApiFunction(
   Handle<Code> construct_stub = isolate()->builtins()->JSConstructStubApi();
 
   int internal_field_count = 0;
-  bool has_external_resource = false;
-  bool use_user_object_comparison = false;
-
   if (!obj->instance_template()->IsUndefined()) {
     Handle<ObjectTemplateInfo> instance_template =
         Handle<ObjectTemplateInfo>(
             ObjectTemplateInfo::cast(obj->instance_template()));
     internal_field_count =
         Smi::cast(instance_template->internal_field_count())->value();
-    has_external_resource =
-        !instance_template->has_external_resource()->IsUndefined();
-    use_user_object_comparison =
-        !instance_template->use_user_object_comparison()->IsUndefined();
   }
 
   int instance_size = kPointerSize * internal_field_count;
-  if (has_external_resource) instance_size += kPointerSize;
-
   InstanceType type = INVALID_TYPE;
   switch (instance_type) {
     case JavaScriptObject:
@@ -1271,16 +1277,6 @@ Handle<JSFunction> Factory::CreateApiFunction(
 
   Handle<Map> map = Handle<Map>(result->initial_map());
 
-  // Mark as having external data object if needed
-  if (has_external_resource) {
-    map->set_has_external_resource(true);
-  }
-
-  // Mark as using user object comparison if needed
-  if (use_user_object_comparison) {
-    map->set_use_user_object_comparison(true);
-  }
-
   // Mark as undetectable if needed.
   if (obj->undetectable()) {
     map->set_is_undetectable();
@@ -1299,10 +1295,6 @@ Handle<JSFunction> Factory::CreateApiFunction(
   // Set interceptor information in the map.
   if (!obj->named_property_handler()->IsUndefined()) {
     map->set_has_named_interceptor();
-    InterceptorInfo *nph = InterceptorInfo::cast(obj->named_property_handler());
-    bool is_fallback =
-        nph->is_fallback()->IsUndefined()?false:nph->is_fallback()->value();
-    map->set_named_interceptor_is_fallback(is_fallback);
   }
   if (!obj->indexed_property_handler()->IsUndefined()) {
     map->set_has_indexed_interceptor();
index 786d4a9..06aad1b 100644 (file)
@@ -162,9 +162,12 @@ class Factory {
   // Create a global (but otherwise uninitialized) context.
   Handle<Context> NewGlobalContext();
 
+  // Create a module context.
+  Handle<Context> NewModuleContext(Handle<Context> previous,
+                                   Handle<ScopeInfo> scope_info);
+
   // Create a function context.
-  Handle<Context> NewFunctionContext(int length,
-                                     Handle<JSFunction> function);
+  Handle<Context> NewFunctionContext(int length, Handle<JSFunction> function);
 
   // Create a catch context.
   Handle<Context> NewCatchContext(Handle<JSFunction> function,
@@ -177,7 +180,7 @@ class Factory {
                                  Handle<Context> previous,
                                  Handle<JSObject> extension);
 
-  // Create a 'block' context.
+  // Create a block context.
   Handle<Context> NewBlockContext(Handle<JSFunction> function,
                                   Handle<Context> previous,
                                   Handle<ScopeInfo> scope_info);
@@ -262,6 +265,9 @@ class Factory {
   // runtime.
   Handle<JSObject> NewJSObjectFromMap(Handle<Map> map);
 
+  // JS modules are pretenured.
+  Handle<JSModule> NewJSModule();
+
   // JS arrays are pretenured when allocated by the parser.
   Handle<JSArray> NewJSArray(int capacity,
                              ElementsKind elements_kind = FAST_ELEMENTS,
index 904533f..ccba511 100644 (file)
@@ -132,6 +132,8 @@ public:
 
 // Flags for language modes and experimental language features.
 DEFINE_bool(use_strict, false, "enforce strict mode")
+DEFINE_bool(es52_globals, false,
+            "activate new semantics for global var declarations")
 
 DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof")
 DEFINE_bool(harmony_scoping, false, "enable harmony block scoping")
@@ -165,7 +167,12 @@ DEFINE_bool(eliminate_dead_phis, true, "eliminate dead phis")
 DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
 DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
 DEFINE_bool(use_inlining, true, "use function inlining")
-DEFINE_bool(limit_inlining, true, "limit code size growth from inlining")
+DEFINE_int(max_inlined_source_size, 600,
+           "maximum source size in bytes considered for a single inlining")
+DEFINE_int(max_inlined_nodes, 196,
+           "maximum number of AST nodes considered for a single inlining")
+DEFINE_int(max_inlined_nodes_cumulative, 196,
+           "maximum cumulative number of AST nodes considered for inlining")
 DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion")
 DEFINE_bool(collect_megamorphic_maps_from_stub_cache,
             true,
@@ -188,6 +195,10 @@ DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing")
 DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
 DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
 DEFINE_bool(use_osr, true, "use on-stack replacement")
+DEFINE_bool(array_bounds_checks_elimination, true,
+            "perform array bounds checks elimination")
+DEFINE_bool(array_index_dehoisting, false,
+            "perform array index dehoisting")
 
 DEFINE_bool(trace_osr, false, "trace on-stack replacement")
 DEFINE_int(stress_runs, 0, "number of stress runs")
@@ -306,7 +317,6 @@ DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
 DEFINE_bool(debugger_auto_break, true,
             "automatically set the debug break flag when debugger commands are "
             "in the queue")
-DEFINE_bool(breakpoint_relocation, true, "relocate breakpoints to the next executable line")
 DEFINE_bool(enable_liveedit, true, "enable liveedit experimental feature")
 DEFINE_bool(break_on_abort, true, "always cause a debug break before aborting")
 
@@ -404,7 +414,7 @@ DEFINE_bool(trace_exception, false,
 DEFINE_bool(preallocate_message_memory, false,
             "preallocate some memory to build stack traces.")
 DEFINE_bool(randomize_hashes,
-            false,
+            true,
             "randomize hashes to avoid predictable hash collisions "
             "(with snapshots this option cannot override the baked-in seed)")
 DEFINE_int(hash_seed,
index 5911284..e265341 100644 (file)
@@ -1359,12 +1359,7 @@ InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
 // -------------------------------------------------------------------------
 
 int NumRegs(RegList reglist) {
-  int n = 0;
-  while (reglist != 0) {
-    n++;
-    reglist &= reglist - 1;  // clear one bit
-  }
-  return n;
+  return CompilerIntrinsics::CountSetBits(reglist);
 }
 
 
index 7178bd4..78cdd0c 100644 (file)
@@ -211,6 +211,9 @@ class StackFrame BASE_EMBEDDED {
 
   virtual void SetCallerFp(Address caller_fp) = 0;
 
+  // Manually changes value of fp in this object.
+  void UpdateFp(Address fp) { state_.fp = fp; }
+
   Address* pc_address() const { return state_.pc_address; }
 
   // Get the id of this stack frame.
index 522fd6f..9b1df4e 100644 (file)
@@ -314,7 +314,8 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
   Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
   Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
   code->set_optimizable(info->IsOptimizable() &&
-                        !info->function()->flags()->Contains(kDontOptimize));
+                        !info->function()->flags()->Contains(kDontOptimize) &&
+                        info->function()->scope()->AllowsLazyRecompilation());
   cgen.PopulateDeoptimizationData(code);
   cgen.PopulateTypeFeedbackInfo(code);
   cgen.PopulateTypeFeedbackCells(code);
@@ -568,89 +569,91 @@ void FullCodeGenerator::DoTest(const TestContext* context) {
 
 void FullCodeGenerator::VisitDeclarations(
     ZoneList<Declaration*>* declarations) {
-  int save_global_count = global_count_;
-  global_count_ = 0;
+  ZoneList<Handle<Object> >* saved_globals = globals_;
+  ZoneList<Handle<Object> > inner_globals(10);
+  globals_ = &inner_globals;
 
   AstVisitor::VisitDeclarations(declarations);
-
-  // Batch declare global functions and variables.
-  if (global_count_ > 0) {
-    Handle<FixedArray> array =
-       isolate()->factory()->NewFixedArray(3 * global_count_, TENURED);
-    int length = declarations->length();
-    for (int j = 0, i = 0; i < length; i++) {
-      Declaration* decl = declarations->at(i);
-      Variable* var = decl->proxy()->var();
-
-      if (var->IsUnallocated()) {
-        array->set(j++, *(var->name()));
-        FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration();
-        if (fun_decl == NULL) {
-          if (var->binding_needs_init()) {
-            // In case this binding needs initialization use the hole.
-            array->set_the_hole(j++);
-          } else {
-            array->set_undefined(j++);
-          }
-        } else {
-          Handle<SharedFunctionInfo> function =
-              Compiler::BuildFunctionInfo(fun_decl->fun(), script());
-          // Check for stack-overflow exception.
-          if (function.is_null()) {
-            SetStackOverflow();
-            return;
-          }
-          array->set(j++, *function);
-        }
-        array->set(j++, Smi::FromInt(var->is_qml_global()));
-      }
-    }
+  if (!globals_->is_empty()) {
     // Invoke the platform-dependent code generator to do the actual
     // declaration the global functions and variables.
+    Handle<FixedArray> array =
+       isolate()->factory()->NewFixedArray(globals_->length(), TENURED);
+    for (int i = 0; i < globals_->length(); ++i)
+      array->set(i, *globals_->at(i));
     DeclareGlobals(array);
   }
 
-  global_count_ = save_global_count;
-}
-
-
-void FullCodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
-  EmitDeclaration(decl->proxy(), decl->mode(), NULL);
+  globals_ = saved_globals;
 }
 
 
-void FullCodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
-  EmitDeclaration(decl->proxy(), decl->mode(), decl->fun());
-}
-
-
-void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* decl) {
-  EmitDeclaration(decl->proxy(), decl->mode(), NULL);
-}
-
-
-void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* decl) {
-  EmitDeclaration(decl->proxy(), decl->mode(), NULL);
-}
+void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
+  Handle<JSModule> instance = module->interface()->Instance();
+  ASSERT(!instance.is_null());
 
+  // Allocate a module context statically.
+  Block* block = module->body();
+  Scope* saved_scope = scope();
+  scope_ = block->scope();
+  Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
+
+  // Generate code for module creation and linking.
+  Comment cmnt(masm_, "[ ModuleLiteral");
+  SetStatementPosition(block);
+
+  if (scope_info->HasContext()) {
+    // Set up module context.
+    __ Push(scope_info);
+    __ Push(instance);
+    __ CallRuntime(Runtime::kPushModuleContext, 2);
+    StoreToFrameField(
+        StandardFrameConstants::kContextOffset, context_register());
+  }
 
-void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* decl) {
-  // TODO(rossberg)
-}
+  {
+    Comment cmnt(masm_, "[ Declarations");
+    VisitDeclarations(scope_->declarations());
+  }
 
+  scope_ = saved_scope;
+  if (scope_info->HasContext()) {
+    // Pop module context.
+    LoadContextField(context_register(), Context::PREVIOUS_INDEX);
+    // Update local stack frame context field.
+    StoreToFrameField(
+        StandardFrameConstants::kContextOffset, context_register());
+  }
 
-void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
-  // TODO(rossberg)
+  // Populate module instance object.
+  const PropertyAttributes attr =
+      static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE | DONT_ENUM);
+  for (Interface::Iterator it = module->interface()->iterator();
+       !it.done(); it.Advance()) {
+    if (it.interface()->IsModule()) {
+      Handle<Object> value = it.interface()->Instance();
+      ASSERT(!value.is_null());
+      JSReceiver::SetProperty(instance, it.name(), value, attr, kStrictMode);
+    } else {
+      // TODO(rossberg): set proper getters instead of undefined...
+      // instance->DefineAccessor(*it.name(), ACCESSOR_GETTER, *getter, attr);
+      Handle<Object> value(isolate()->heap()->undefined_value());
+      JSReceiver::SetProperty(instance, it.name(), value, attr, kStrictMode);
+    }
+  }
+  USE(instance->PreventExtensions());
 }
 
 
 void FullCodeGenerator::VisitModuleVariable(ModuleVariable* module) {
-  // TODO(rossberg)
+  // Noting to do.
+  // The instance object is resolved statically through the module's interface.
 }
 
 
 void FullCodeGenerator::VisitModulePath(ModulePath* module) {
-  // TODO(rossberg)
+  // Noting to do.
+  // The instance object is resolved statically through the module's interface.
 }
 
 
@@ -912,9 +915,9 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
 
   Scope* saved_scope = scope();
   // Push a block context when entering a block with block scoped variables.
-  if (stmt->block_scope() != NULL) {
+  if (stmt->scope() != NULL) {
     { Comment cmnt(masm_, "[ Extend block context");
-      scope_ = stmt->block_scope();
+      scope_ = stmt->scope();
       Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
       int heap_slots = scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
       __ Push(scope_info);
@@ -941,7 +944,7 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
   PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
 
   // Pop block context if necessary.
-  if (stmt->block_scope() != NULL) {
+  if (stmt->scope() != NULL) {
     LoadContextField(context_register(), Context::PREVIOUS_INDEX);
     // Update local stack frame context field.
     StoreToFrameField(StandardFrameConstants::kContextOffset,
index 689abae..0e0ffe9 100644 (file)
@@ -83,7 +83,7 @@ class FullCodeGenerator: public AstVisitor {
         scope_(info->scope()),
         nesting_stack_(NULL),
         loop_depth_(0),
-        global_count_(0),
+        globals_(NULL),
         context_(NULL),
         bailout_entries_(info->HasDeoptimizationSupport()
                          ? info->function()->ast_node_count() : 0),
@@ -202,7 +202,7 @@ class FullCodeGenerator: public AstVisitor {
     virtual ~NestedBlock() {}
 
     virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
-      if (statement()->AsBlock()->block_scope() != NULL) {
+      if (statement()->AsBlock()->scope() != NULL) {
         ++(*context_length);
       }
       return previous_;
@@ -413,12 +413,9 @@ class FullCodeGenerator: public AstVisitor {
                                     Label* if_true,
                                     Label* if_false);
 
-  // Platform-specific code for a variable, constant, or function
-  // declaration.  Functions have an initial value.
-  // Increments global_count_ for unallocated variables.
-  void EmitDeclaration(VariableProxy* proxy,
-                       VariableMode mode,
-                       FunctionLiteral* function);
+  // If enabled, emit debug code for checking that the current context is
+  // neither a with nor a catch context.
+  void EmitDebugCheckDeclarationContext(Variable* variable);
 
   // Platform-specific code for checking the stack limit at the back edge of
   // a loop.
@@ -548,13 +545,8 @@ class FullCodeGenerator: public AstVisitor {
   Handle<Script> script() { return info_->script(); }
   bool is_eval() { return info_->is_eval(); }
   bool is_native() { return info_->is_native(); }
-  bool is_classic_mode() {
-    return language_mode() == CLASSIC_MODE;
-  }
-  LanguageMode language_mode() {
-    return function()->language_mode();
-  }
-  bool is_qml_mode() { return function()->qml_mode(); }
+  bool is_classic_mode() { return language_mode() == CLASSIC_MODE; }
+  LanguageMode language_mode() { return function()->language_mode(); }
   FunctionLiteral* function() { return info_->function(); }
   Scope* scope() { return scope_; }
 
@@ -786,7 +778,7 @@ class FullCodeGenerator: public AstVisitor {
   Label return_label_;
   NestedStatement* nesting_stack_;
   int loop_depth_;
-  int global_count_;
+  ZoneList<Handle<Object> >* globals_;
   const ExpressionContext* context_;
   ZoneList<BailoutEntry> bailout_entries_;
   ZoneList<BailoutEntry> stack_checks_;
index 790c6bf..97b033f 100644 (file)
@@ -345,6 +345,9 @@ F FUNCTION_CAST(Address addr) {
 #define INLINE(header) inline __attribute__((always_inline)) header
 #define NO_INLINE(header) __attribute__((noinline)) header
 #endif
+#elif defined(_MSC_VER) && !defined(DEBUG)
+#define INLINE(header) __forceinline header
+#define NO_INLINE(header) header
 #else
 #define INLINE(header) inline header
 #define NO_INLINE(header) header
@@ -396,12 +399,6 @@ enum StrictModeFlag {
   kStrictMode
 };
 
-// The QML Compilation Mode
-enum QmlModeFlag {
-  kNonQmlMode,
-  kQmlMode
-};
-
 
 } }  // namespace v8::internal
 
index 416ecbd..def1604 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -729,9 +729,9 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
         Handle<DescriptorArray>(object->map()->instance_descriptors(), isolate);
 
     for (int i = 0; i < descs->number_of_descriptors(); i++) {
-      if (descs->IsProperty(i) && !descs->IsDontEnum(i)) {
+      if (descs->IsProperty(i) && !descs->GetDetails(i).IsDontEnum()) {
         storage->set(index, descs->GetKey(i));
-        PropertyDetails details(descs->GetDetails(i));
+        PropertyDetails details = descs->GetDetails(i);
         sort_array->set(index, Smi::FromInt(details.index()));
         if (!indices.is_null()) {
           if (details.type() != FIELD) {
index 5aeb895..91843b8 100644 (file)
@@ -63,7 +63,9 @@ class TemplateHashMapImpl {
   Entry* Lookup(void* key, uint32_t hash, bool insert);
 
   // Removes the entry with matching key.
-  void Remove(void* key, uint32_t hash);
+  // It returns the value of the deleted entry
+  // or null if there is no value for such key.
+  void* Remove(void* key, uint32_t hash);
 
   // Empties the hash map (occupancy() == 0).
   void Clear();
@@ -146,14 +148,15 @@ typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Lookup(
 
 
 template<class P>
-void TemplateHashMapImpl<P>::Remove(void* key, uint32_t hash) {
+void* TemplateHashMapImpl<P>::Remove(void* key, uint32_t hash) {
   // Lookup the entry for the key to remove.
   Entry* p = Probe(key, hash);
   if (p->key == NULL) {
     // Key not found nothing to remove.
-    return;
+    return NULL;
   }
 
+  void* value = p->value;
   // To remove an entry we need to ensure that it does not create an empty
   // entry that will cause the search for another entry to stop too soon. If all
   // the entries between the entry to remove and the next empty slot have their
@@ -202,6 +205,7 @@ void TemplateHashMapImpl<P>::Remove(void* key, uint32_t hash) {
   // Clear the entry which is allowed to en emptied.
   p->key = NULL;
   occupancy_--;
+  return value;
 }
 
 
index 72acf3d..e12895a 100644 (file)
@@ -127,7 +127,6 @@ MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
   String* answer = String::cast(result);
   answer->set_length(str.length());
   answer->set_hash_field(hash_field);
-  SeqString::cast(answer)->set_symbol_id(0);
 
   ASSERT_EQ(size, answer->Size());
 
@@ -161,7 +160,6 @@ MaybeObject* Heap::AllocateTwoByteSymbol(Vector<const uc16> str,
   String* answer = String::cast(result);
   answer->set_length(str.length());
   answer->set_hash_field(hash_field);
-  SeqString::cast(answer)->set_symbol_id(0);
 
   ASSERT_EQ(size, answer->Size());
 
@@ -246,33 +244,18 @@ MaybeObject* Heap::NumberFromUint32(
 }
 
 
-void Heap::FinalizeExternalString(HeapObject* string) {
-  ASSERT(string->IsExternalString() || string->map()->has_external_resource());
-
-  if (string->IsExternalString()) {
-    v8::String::ExternalStringResourceBase** resource_addr =
-        reinterpret_cast<v8::String::ExternalStringResourceBase**>(
-            reinterpret_cast<byte*>(string) +
-            ExternalString::kResourceOffset -
-            kHeapObjectTag);
+void Heap::FinalizeExternalString(String* string) {
+  ASSERT(string->IsExternalString());
+  v8::String::ExternalStringResourceBase** resource_addr =
+      reinterpret_cast<v8::String::ExternalStringResourceBase**>(
+          reinterpret_cast<byte*>(string) +
+          ExternalString::kResourceOffset -
+          kHeapObjectTag);
 
-    // Dispose of the C++ object if it has not already been disposed.
-    if (*resource_addr != NULL) {
-      (*resource_addr)->Dispose();
-      *resource_addr = NULL;
-    }
-  } else {
-    JSObject *object = JSObject::cast(string);
-    Object *value = object->GetExternalResourceObject();
-    v8::Object::ExternalResource *resource = 0;
-    if (value->IsSmi()) {
-        resource = reinterpret_cast<v8::Object::ExternalResource*>(Internals::GetExternalPointerFromSmi(value));
-    } else if (value->IsForeign()) {
-        resource = reinterpret_cast<v8::Object::ExternalResource*>(Foreign::cast(value)->foreign_address());
-    }
-    if (resource) {
-        resource->Dispose();
-    }
+  // Dispose of the C++ object if it has not already been disposed.
+  if (*resource_addr != NULL) {
+    (*resource_addr)->Dispose();
+    *resource_addr = NULL;
   }
 }
 
@@ -477,15 +460,16 @@ MaybeObject* Heap::PrepareForCompare(String* str) {
 }
 
 
-int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
+intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
+    intptr_t change_in_bytes) {
   ASSERT(HasBeenSetUp());
-  int amount = amount_of_external_allocated_memory_ + change_in_bytes;
+  intptr_t amount = amount_of_external_allocated_memory_ + change_in_bytes;
   if (change_in_bytes >= 0) {
     // Avoid overflow.
     if (amount > amount_of_external_allocated_memory_) {
       amount_of_external_allocated_memory_ = amount;
     }
-    int amount_since_last_global_gc =
+    intptr_t amount_since_last_global_gc =
         amount_of_external_allocated_memory_ -
         amount_of_external_allocated_memory_at_last_global_gc_;
     if (amount_since_last_global_gc > external_allocation_limit_) {
@@ -594,16 +578,6 @@ void ExternalStringTable::AddString(String* string) {
 }
 
 
-void ExternalStringTable::AddObject(HeapObject* object) {
-  ASSERT(object->map()->has_external_resource());
-  if (heap_->InNewSpace(object)) {
-    new_space_strings_.Add(object);
-  } else {
-    old_space_strings_.Add(object);
-  }
-}
-
-
 void ExternalStringTable::Iterate(ObjectVisitor* v) {
   if (!new_space_strings_.is_empty()) {
     Object** start = &new_space_strings_[0];
@@ -632,14 +606,14 @@ void ExternalStringTable::Verify() {
 }
 
 
-void ExternalStringTable::AddOldObject(HeapObject* object) {
-  ASSERT(object->IsExternalString() || object->map()->has_external_resource());
-  ASSERT(!heap_->InNewSpace(object));
-  old_space_strings_.Add(object);
+void ExternalStringTable::AddOldString(String* string) {
+  ASSERT(string->IsExternalString());
+  ASSERT(!heap_->InNewSpace(string));
+  old_space_strings_.Add(string);
 }
 
 
-void ExternalStringTable::ShrinkNewObjects(int position) {
+void ExternalStringTable::ShrinkNewStrings(int position) {
   new_space_strings_.Rewind(position);
   if (FLAG_verify_heap) {
     Verify();
index 8be6f27..2e971a5 100644 (file)
@@ -33,7 +33,6 @@
 namespace v8 {
 namespace internal {
 
-
 HeapProfiler::HeapProfiler()
     : snapshots_(new HeapSnapshotsCollection()),
       next_snapshot_uid_(1) {
@@ -86,6 +85,24 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(String* name,
 }
 
 
+void HeapProfiler::StartHeapObjectsTracking() {
+  ASSERT(Isolate::Current()->heap_profiler() != NULL);
+  Isolate::Current()->heap_profiler()->StartHeapObjectsTrackingImpl();
+}
+
+
+void HeapProfiler::StopHeapObjectsTracking() {
+  ASSERT(Isolate::Current()->heap_profiler() != NULL);
+  Isolate::Current()->heap_profiler()->StopHeapObjectsTrackingImpl();
+}
+
+
+void HeapProfiler::PushHeapObjectsStats(v8::OutputStream* stream) {
+  ASSERT(Isolate::Current()->heap_profiler() != NULL);
+  return Isolate::Current()->heap_profiler()->PushHeapObjectsStatsImpl(stream);
+}
+
+
 void HeapProfiler::DefineWrapperClass(
     uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback) {
   ASSERT(class_id != v8::HeapProfiler::kPersistentHandleNoClassId);
@@ -136,6 +153,20 @@ HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name,
   return TakeSnapshotImpl(snapshots_->names()->GetName(name), type, control);
 }
 
+void HeapProfiler::StartHeapObjectsTrackingImpl() {
+  snapshots_->StartHeapObjectsTracking();
+}
+
+
+void HeapProfiler::PushHeapObjectsStatsImpl(OutputStream* stream) {
+  snapshots_->PushHeapObjectsStats(stream);
+}
+
+
+void HeapProfiler::StopHeapObjectsTrackingImpl() {
+  snapshots_->StopHeapObjectsTracking();
+}
+
 
 int HeapProfiler::GetSnapshotsCount() {
   HeapProfiler* profiler = Isolate::Current()->heap_profiler();
@@ -158,6 +189,15 @@ HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
 }
 
 
+SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
+  if (!obj->IsHeapObject())
+    return v8::HeapProfiler::kUnknownObjectId;
+  HeapProfiler* profiler = Isolate::Current()->heap_profiler();
+  ASSERT(profiler != NULL);
+  return profiler->snapshots_->FindObjectId(HeapObject::cast(*obj)->address());
+}
+
+
 void HeapProfiler::DeleteAllSnapshots() {
   HeapProfiler* profiler = Isolate::Current()->heap_profiler();
   ASSERT(profiler != NULL);
index ef5c4f4..96b042d 100644 (file)
@@ -44,8 +44,6 @@ class HeapSnapshotsCollection;
     }                                                                        \
   } while (false)
 
-// The HeapProfiler writes data to the log files, which can be postprocessed
-// to generate .hp files for use by the GHC/Valgrind tool hp2ps.
 class HeapProfiler {
  public:
   static void SetUp();
@@ -57,9 +55,14 @@ class HeapProfiler {
   static HeapSnapshot* TakeSnapshot(String* name,
                                     int type,
                                     v8::ActivityControl* control);
+
+  static void StartHeapObjectsTracking();
+  static void StopHeapObjectsTracking();
+  static void PushHeapObjectsStats(OutputStream* stream);
   static int GetSnapshotsCount();
   static HeapSnapshot* GetSnapshot(int index);
   static HeapSnapshot* FindSnapshot(unsigned uid);
+  static SnapshotObjectId GetSnapshotObjectId(Handle<Object> obj);
   static void DeleteAllSnapshots();
 
   void ObjectMoveEvent(Address from, Address to);
@@ -84,6 +87,10 @@ class HeapProfiler {
                                  v8::ActivityControl* control);
   void ResetSnapshots();
 
+  void StartHeapObjectsTrackingImpl();
+  void StopHeapObjectsTrackingImpl();
+  void PushHeapObjectsStatsImpl(OutputStream* stream);
+
   HeapSnapshotsCollection* snapshots_;
   unsigned next_snapshot_uid_;
   List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
index 797bb15..d3c7f0a 100644 (file)
@@ -171,6 +171,9 @@ Heap::Heap()
   global_contexts_list_ = NULL;
   mark_compact_collector_.heap_ = this;
   external_string_table_.heap_ = this;
+  // Put a dummy entry in the remembered pages so we can find the list the
+  // minidump even if there are no real unmapped pages.
+  RememberUnmappedPage(NULL, false);
 }
 
 
@@ -238,12 +241,17 @@ int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
                                               const char** reason) {
   // Is global GC requested?
-  if (space != NEW_SPACE || FLAG_gc_global) {
+  if (space != NEW_SPACE) {
     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
     *reason = "GC in old space requested";
     return MARK_COMPACTOR;
   }
 
+  if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
+    *reason = "GC in old space forced by flags";
+    return MARK_COMPACTOR;
+  }
+
   // Is enough data promoted to justify a global GC?
   if (OldGenerationPromotionLimitReached()) {
     isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
@@ -800,7 +808,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
 
     UpdateSurvivalRateTrend(start_new_space_size);
 
-    size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
+    size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
 
     if (high_survival_rate_during_scavenges &&
         IsStableOrIncreasingSurvivalTrend()) {
@@ -1124,6 +1132,27 @@ void PromotionQueue::RelocateQueueHead() {
 }
 
 
+class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
+ public:
+  explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
+
+  virtual Object* RetainAs(Object* object) {
+    if (!heap_->InFromSpace(object)) {
+      return object;
+    }
+
+    MapWord map_word = HeapObject::cast(object)->map_word();
+    if (map_word.IsForwardingAddress()) {
+      return map_word.ToForwardingAddress();
+    }
+    return NULL;
+  }
+
+ private:
+  Heap* heap_;
+};
+
+
 void Heap::Scavenge() {
 #ifdef DEBUG
   if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
@@ -1222,6 +1251,9 @@ void Heap::Scavenge() {
   }
   incremental_marking()->UpdateMarkingDequeAfterScavenge();
 
+  ScavengeWeakObjectRetainer weak_object_retainer(this);
+  ProcessWeakReferences(&weak_object_retainer);
+
   ASSERT(new_space_front == new_space_.top());
 
   // Set age mark.
@@ -1242,18 +1274,18 @@ void Heap::Scavenge() {
 }
 
 
-HeapObject* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
-                                                                    Object** p) {
+String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
+                                                                Object** p) {
   MapWord first_word = HeapObject::cast(*p)->map_word();
 
   if (!first_word.IsForwardingAddress()) {
     // Unreachable external string can be finalized.
-    heap->FinalizeExternalString(HeapObject::cast(*p));
+    heap->FinalizeExternalString(String::cast(*p));
     return NULL;
   }
 
   // String is still reachable.
-  return HeapObject::cast(first_word.ToForwardingAddress());
+  return String::cast(first_word.ToForwardingAddress());
 }
 
 
@@ -1271,11 +1303,11 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable(
 
   for (Object** p = start; p < end; ++p) {
     ASSERT(InFromSpace(*p));
-    HeapObject* target = updater_func(this, p);
+    String* target = updater_func(this, p);
 
     if (target == NULL) continue;
 
-    ASSERT(target->IsExternalString() || target->map()->has_external_resource());
+    ASSERT(target->IsExternalString());
 
     if (InNewSpace(target)) {
       // String is still in new space.  Update the table entry.
@@ -1283,12 +1315,12 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable(
       ++last;
     } else {
       // String got promoted.  Move it to the old string list.
-      external_string_table_.AddOldObject(target);
+      external_string_table_.AddOldString(target);
     }
   }
 
   ASSERT(last <= end);
-  external_string_table_.ShrinkNewObjects(static_cast<int>(last - start));
+  external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
 }
 
 
@@ -1308,7 +1340,8 @@ void Heap::UpdateReferencesInExternalStringTable(
 
 static Object* ProcessFunctionWeakReferences(Heap* heap,
                                              Object* function,
-                                             WeakObjectRetainer* retainer) {
+                                             WeakObjectRetainer* retainer,
+                                             bool record_slots) {
   Object* undefined = heap->undefined_value();
   Object* head = undefined;
   JSFunction* tail = NULL;
@@ -1325,6 +1358,12 @@ static Object* ProcessFunctionWeakReferences(Heap* heap,
         // Subsequent elements in the list.
         ASSERT(tail != NULL);
         tail->set_next_function_link(retain);
+        if (record_slots) {
+          Object** next_function =
+              HeapObject::RawField(tail, JSFunction::kNextFunctionLinkOffset);
+          heap->mark_compact_collector()->RecordSlot(
+              next_function, next_function, retain);
+        }
       }
       // Retained function is new tail.
       candidate_function = reinterpret_cast<JSFunction*>(retain);
@@ -1353,6 +1392,15 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
   Object* head = undefined;
   Context* tail = NULL;
   Object* candidate = global_contexts_list_;
+
+  // We don't record weak slots during marking or scavenges.
+  // Instead we do it once when we complete mark-compact cycle.
+  // Note that write barrier has no effect if we are already in the middle of
+  // compacting mark-sweep cycle and we have to record slots manually.
+  bool record_slots =
+      gc_state() == MARK_COMPACT &&
+      mark_compact_collector()->is_compacting();
+
   while (candidate != undefined) {
     // Check whether to keep the candidate in the list.
     Context* candidate_context = reinterpret_cast<Context*>(candidate);
@@ -1368,6 +1416,14 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
                             Context::NEXT_CONTEXT_LINK,
                             retain,
                             UPDATE_WRITE_BARRIER);
+
+        if (record_slots) {
+          Object** next_context =
+              HeapObject::RawField(
+                  tail, FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK));
+          mark_compact_collector()->RecordSlot(
+              next_context, next_context, retain);
+        }
       }
       // Retained context is new tail.
       candidate_context = reinterpret_cast<Context*>(retain);
@@ -1380,11 +1436,19 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
           ProcessFunctionWeakReferences(
               this,
               candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
-              retainer);
+              retainer,
+              record_slots);
       candidate_context->set_unchecked(this,
                                        Context::OPTIMIZED_FUNCTIONS_LIST,
                                        function_list_head,
                                        UPDATE_WRITE_BARRIER);
+      if (record_slots) {
+        Object** optimized_functions =
+            HeapObject::RawField(
+                tail, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
+        mark_compact_collector()->RecordSlot(
+            optimized_functions, optimized_functions, function_list_head);
+      }
     }
 
     // Move to next element in the list.
@@ -1484,6 +1548,27 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
 }
 
 
+STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
+
+
+INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
+                                              HeapObject* object,
+                                              int size));
+
+static HeapObject* EnsureDoubleAligned(Heap* heap,
+                                       HeapObject* object,
+                                       int size) {
+  if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
+    heap->CreateFillerObjectAt(object->address(), kPointerSize);
+    return HeapObject::FromAddress(object->address() + kPointerSize);
+  } else {
+    heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
+                               kPointerSize);
+    return object;
+  }
+}
+
+
 enum LoggingAndProfiling {
   LOGGING_AND_PROFILING_ENABLED,
   LOGGING_AND_PROFILING_DISABLED
@@ -1607,7 +1692,10 @@ class ScavengingVisitor : public StaticVisitorBase {
     }
   }
 
-  template<ObjectContents object_contents, SizeRestriction size_restriction>
+
+  template<ObjectContents object_contents,
+           SizeRestriction size_restriction,
+           int alignment>
   static inline void EvacuateObject(Map* map,
                                     HeapObject** slot,
                                     HeapObject* object,
@@ -1616,19 +1704,26 @@ class ScavengingVisitor : public StaticVisitorBase {
                 (object_size <= Page::kMaxNonCodeHeapObjectSize));
     SLOW_ASSERT(object->Size() == object_size);
 
+    int allocation_size = object_size;
+    if (alignment != kObjectAlignment) {
+      ASSERT(alignment == kDoubleAlignment);
+      allocation_size += kPointerSize;
+    }
+
     Heap* heap = map->GetHeap();
     if (heap->ShouldBePromoted(object->address(), object_size)) {
       MaybeObject* maybe_result;
 
       if ((size_restriction != SMALL) &&
-          (object_size > Page::kMaxNonCodeHeapObjectSize)) {
-        maybe_result = heap->lo_space()->AllocateRaw(object_size,
+          (allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
+        maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
                                                      NOT_EXECUTABLE);
       } else {
         if (object_contents == DATA_OBJECT) {
-          maybe_result = heap->old_data_space()->AllocateRaw(object_size);
+          maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
         } else {
-          maybe_result = heap->old_pointer_space()->AllocateRaw(object_size);
+          maybe_result =
+              heap->old_pointer_space()->AllocateRaw(allocation_size);
         }
       }
 
@@ -1636,6 +1731,10 @@ class ScavengingVisitor : public StaticVisitorBase {
       if (maybe_result->ToObject(&result)) {
         HeapObject* target = HeapObject::cast(result);
 
+        if (alignment != kObjectAlignment) {
+          target = EnsureDoubleAligned(heap, target, allocation_size);
+        }
+
         // Order is important: slot might be inside of the target if target
         // was allocated over a dead object and slot comes from the store
         // buffer.
@@ -1643,18 +1742,27 @@ class ScavengingVisitor : public StaticVisitorBase {
         MigrateObject(heap, object, target, object_size);
 
         if (object_contents == POINTER_OBJECT) {
-          heap->promotion_queue()->insert(target, object_size);
+          if (map->instance_type() == JS_FUNCTION_TYPE) {
+            heap->promotion_queue()->insert(
+                target, JSFunction::kNonWeakFieldsEndOffset);
+          } else {
+            heap->promotion_queue()->insert(target, object_size);
+          }
         }
 
         heap->tracer()->increment_promoted_objects_size(object_size);
         return;
       }
     }
-    MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size);
+    MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
     heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
     Object* result = allocation->ToObjectUnchecked();
     HeapObject* target = HeapObject::cast(result);
 
+    if (alignment != kObjectAlignment) {
+      target = EnsureDoubleAligned(heap, target, allocation_size);
+    }
+
     // Order is important: slot might be inside of the target if target
     // was allocated over a dead object and slot comes from the store
     // buffer.
@@ -1690,7 +1798,7 @@ class ScavengingVisitor : public StaticVisitorBase {
                                         HeapObject** slot,
                                         HeapObject* object) {
     int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
-    EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
+    EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
                                                  slot,
                                                  object,
                                                  object_size);
@@ -1702,10 +1810,11 @@ class ScavengingVisitor : public StaticVisitorBase {
                                               HeapObject* object) {
     int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
     int object_size = FixedDoubleArray::SizeFor(length);
-    EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map,
-                                              slot,
-                                              object,
-                                              object_size);
+    EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
+        map,
+        slot,
+        object,
+        object_size);
   }
 
 
@@ -1713,7 +1822,8 @@ class ScavengingVisitor : public StaticVisitorBase {
                                        HeapObject** slot,
                                        HeapObject* object) {
     int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
-    EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
+    EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
+        map, slot, object, object_size);
   }
 
 
@@ -1722,7 +1832,8 @@ class ScavengingVisitor : public StaticVisitorBase {
                                             HeapObject* object) {
     int object_size = SeqAsciiString::cast(object)->
         SeqAsciiStringSize(map->instance_type());
-    EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
+    EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
+        map, slot, object, object_size);
   }
 
 
@@ -1731,7 +1842,8 @@ class ScavengingVisitor : public StaticVisitorBase {
                                               HeapObject* object) {
     int object_size = SeqTwoByteString::cast(object)->
         SeqTwoByteStringSize(map->instance_type());
-    EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
+    EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
+        map, slot, object, object_size);
   }
 
 
@@ -1774,7 +1886,8 @@ class ScavengingVisitor : public StaticVisitorBase {
     }
 
     int object_size = ConsString::kSize;
-    EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
+    EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
+        map, slot, object, object_size);
   }
 
   template<ObjectContents object_contents>
@@ -1784,14 +1897,16 @@ class ScavengingVisitor : public StaticVisitorBase {
     static inline void VisitSpecialized(Map* map,
                                         HeapObject** slot,
                                         HeapObject* object) {
-      EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
+      EvacuateObject<object_contents, SMALL, kObjectAlignment>(
+          map, slot, object, object_size);
     }
 
     static inline void Visit(Map* map,
                              HeapObject** slot,
                              HeapObject* object) {
       int object_size = map->instance_size();
-      EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
+      EvacuateObject<object_contents, SMALL, kObjectAlignment>(
+          map, slot, object, object_size);
     }
   };
 
@@ -1908,7 +2023,7 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
   map->set_pre_allocated_property_fields(0);
   map->init_instance_descriptors();
   map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
-  map->set_prototype_transitions(empty_fixed_array(), SKIP_WRITE_BARRIER);
+  map->init_prototype_transitions(undefined_value());
   map->set_unused_property_fields(0);
   map->set_bit_field(0);
   map->set_bit_field2(1 << Map::kIsExtensible);
@@ -2047,15 +2162,15 @@ bool Heap::CreateInitialMaps() {
   // Fix the instance_descriptors for the existing maps.
   meta_map()->init_instance_descriptors();
   meta_map()->set_code_cache(empty_fixed_array());
-  meta_map()->set_prototype_transitions(empty_fixed_array());
+  meta_map()->init_prototype_transitions(undefined_value());
 
   fixed_array_map()->init_instance_descriptors();
   fixed_array_map()->set_code_cache(empty_fixed_array());
-  fixed_array_map()->set_prototype_transitions(empty_fixed_array());
+  fixed_array_map()->init_prototype_transitions(undefined_value());
 
   oddball_map()->init_instance_descriptors();
   oddball_map()->set_code_cache(empty_fixed_array());
-  oddball_map()->set_prototype_transitions(empty_fixed_array());
+  oddball_map()->init_prototype_transitions(undefined_value());
 
   // Fix prototype object for existing maps.
   meta_map()->set_prototype(null_value());
@@ -3827,6 +3942,16 @@ MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
 }
 
 
+MaybeObject* Heap::AllocateJSModule() {
+  // Allocate a fresh map. Modules do not have a prototype.
+  Map* map;
+  MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
+  if (!maybe_map->To(&map)) return maybe_map;
+  // Allocate the object based on the map.
+  return AllocateJSObjectFromMap(map, TENURED);
+}
+
+
 MaybeObject* Heap::AllocateJSArrayAndStorage(
     ElementsKind elements_kind,
     int length,
@@ -3963,7 +4088,7 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
   // Fill these accessors into the dictionary.
   DescriptorArray* descs = map->instance_descriptors();
   for (int i = 0; i < descs->number_of_descriptors(); i++) {
-    PropertyDetails details(descs->GetDetails(i));
+    PropertyDetails details = descs->GetDetails(i);
     ASSERT(details.type() == CALLBACKS);  // Only accessors are expected.
     PropertyDetails d =
         PropertyDetails(details.attributes(), CALLBACKS, details.index());
@@ -4314,7 +4439,6 @@ MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
   String* answer = String::cast(result);
   answer->set_length(chars);
   answer->set_hash_field(hash_field);
-  SeqString::cast(answer)->set_symbol_id(0);
 
   ASSERT_EQ(size, answer->Size());
 
@@ -4365,7 +4489,6 @@ MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
   HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
   String::cast(result)->set_length(length);
   String::cast(result)->set_hash_field(String::kEmptyHashField);
-  SeqString::cast(result)->set_symbol_id(0);
   ASSERT_EQ(size, HeapObject::cast(result)->Size());
   return result;
 }
@@ -4402,7 +4525,6 @@ MaybeObject* Heap::AllocateRawTwoByteString(int length,
   HeapObject::cast(result)->set_map_no_write_barrier(string_map());
   String::cast(result)->set_length(length);
   String::cast(result)->set_hash_field(String::kEmptyHashField);
-  SeqString::cast(result)->set_symbol_id(0);
   ASSERT_EQ(size, HeapObject::cast(result)->Size());
   return result;
 }
@@ -4659,6 +4781,11 @@ MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
   AllocationSpace space =
       (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
   int size = FixedDoubleArray::SizeFor(length);
+
+#ifndef V8_HOST_ARCH_64_BIT
+  size += kPointerSize;
+#endif
+
   if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
     // Too big for new space.
     space = LO_SPACE;
@@ -4671,7 +4798,12 @@ MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
   AllocationSpace retry_space =
       (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
 
-  return AllocateRaw(size, space, retry_space);
+  HeapObject* object;
+  { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
+    if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
+  }
+
+  return EnsureDoubleAligned(this, object, size);
 }
 
 
@@ -4704,6 +4836,22 @@ MaybeObject* Heap::AllocateGlobalContext() {
 }
 
 
+MaybeObject* Heap::AllocateModuleContext(Context* previous,
+                                         ScopeInfo* scope_info) {
+  Object* result;
+  { MaybeObject* maybe_result =
+        AllocateFixedArrayWithHoles(scope_info->ContextLength(), TENURED);
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  Context* context = reinterpret_cast<Context*>(result);
+  context->set_map_no_write_barrier(module_context_map());
+  context->set_previous(previous);
+  context->set_extension(scope_info);
+  context->set_global(previous->global());
+  return context;
+}
+
+
 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
   ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
   Object* result;
@@ -4716,7 +4864,6 @@ MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
   context->set_previous(function->context());
   context->set_extension(NULL);
   context->set_global(function->context()->global());
-  context->set_qml_global(function->context()->qml_global());
   return context;
 }
 
@@ -4737,7 +4884,6 @@ MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
   context->set_previous(previous);
   context->set_extension(name);
   context->set_global(previous->global());
-  context->set_qml_global(previous->qml_global());
   context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
   return context;
 }
@@ -4756,7 +4902,6 @@ MaybeObject* Heap::AllocateWithContext(JSFunction* function,
   context->set_previous(previous);
   context->set_extension(extension);
   context->set_global(previous->global());
-  context->set_qml_global(previous->qml_global());
   return context;
 }
 
@@ -4775,7 +4920,6 @@ MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
   context->set_previous(previous);
   context->set_extension(scope_info);
   context->set_global(previous->global());
-  context->set_qml_global(previous->qml_global());
   return context;
 }
 
@@ -4850,8 +4994,10 @@ void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
 
 bool Heap::IdleNotification(int hint) {
   const int kMaxHint = 1000;
-  intptr_t size_factor = Min(Max(hint, 30), kMaxHint) / 10;
-  // The size factor is in range [3..100].
+  intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
+  // The size factor is in range [5..250]. The numbers here are chosen from
+  // experiments. If you changes them, make sure to test with
+  // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
   intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
 
   if (contexts_disposed_ > 0) {
@@ -4875,11 +5021,14 @@ bool Heap::IdleNotification(int hint) {
     // Take into account that we might have decided to delay full collection
     // because incremental marking is in progress.
     ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
+    // After context disposal there is likely a lot of garbage remaining, reset
+    // the idle notification counters in order to trigger more incremental GCs
+    // on subsequent idle notifications.
+    StartIdleRound();
     return false;
   }
 
-  if (hint >= kMaxHint || !FLAG_incremental_marking ||
-      FLAG_expose_gc || Serializer::enabled()) {
+  if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
     return IdleGlobalGC();
   }
 
@@ -4918,10 +5067,6 @@ bool Heap::IdleNotification(int hint) {
   }
 
   if (incremental_marking()->IsStopped()) {
-    if (!WorthStartingGCWhenIdle()) {
-      FinishIdleRound();
-      return true;
-    }
     incremental_marking()->Start();
   }
 
@@ -5559,6 +5704,11 @@ bool Heap::ConfigureHeap(int max_semispace_size,
                          intptr_t max_executable_size) {
   if (HasBeenSetUp()) return false;
 
+  if (FLAG_stress_compaction) {
+    // This will cause more frequent GCs when stressing.
+    max_semispace_size_ = Page::kPageSize;
+  }
+
   if (max_semispace_size > 0) {
     if (max_semispace_size < Page::kPageSize) {
       max_semispace_size = Page::kPageSize;
@@ -5663,16 +5813,6 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
 }
 
 
-intptr_t Heap::PromotedSpaceSize() {
-  return old_pointer_space_->Size()
-      + old_data_space_->Size()
-      + code_space_->Size()
-      + map_space_->Size()
-      + cell_space_->Size()
-      + lo_space_->Size();
-}
-
-
 intptr_t Heap::PromotedSpaceSizeOfObjects() {
   return old_pointer_space_->SizeOfObjects()
       + old_data_space_->SizeOfObjects()
@@ -5683,7 +5823,7 @@ intptr_t Heap::PromotedSpaceSizeOfObjects() {
 }
 
 
-int Heap::PromotedExternalMemorySize() {
+intptr_t Heap::PromotedExternalMemorySize() {
   if (amount_of_external_allocated_memory_
       <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
   return amount_of_external_allocated_memory_
@@ -5995,6 +6135,11 @@ void Heap::SetStackLimits() {
 
 
 void Heap::TearDown() {
+#ifdef DEBUG
+  if (FLAG_verify_heap) {
+    Verify();
+  }
+#endif
   if (FLAG_print_cumulative_gc_stat) {
     PrintF("\n\n");
     PrintF("gc_count=%d ", gc_count_);
@@ -6920,19 +7065,6 @@ void ExternalStringTable::CleanUp() {
 
 
 void ExternalStringTable::TearDown() {
-  for (int i = 0; i < new_space_strings_.length(); ++i) {
-    if (new_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
-    HeapObject *object = HeapObject::cast(new_space_strings_[i]);
-    if (!object->IsExternalString())
-        heap_->FinalizeExternalString(object);
-  }
-  for (int i = 0; i < old_space_strings_.length(); ++i) {
-    if (old_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
-    HeapObject *object = HeapObject::cast(old_space_strings_[i]);
-    if (!object->IsExternalString())
-        heap_->FinalizeExternalString(object);
-  }
-
   new_space_strings_.Free();
   old_space_strings_.Free();
 }
index 68fcc4a..beb1bc5 100644 (file)
@@ -197,7 +197,6 @@ namespace internal {
   V(string_symbol, "string")                                             \
   V(String_symbol, "String")                                             \
   V(Date_symbol, "Date")                                                 \
-  V(Error_symbol, "Error")                                               \
   V(this_symbol, "this")                                                 \
   V(to_string_symbol, "toString")                                        \
   V(char_at_symbol, "CharAt")                                            \
@@ -244,7 +243,8 @@ namespace internal {
   V(compare_ic_symbol, ".compare_ic")                                    \
   V(infinity_symbol, "Infinity")                                         \
   V(minus_infinity_symbol, "-Infinity")                                  \
-  V(hidden_stack_trace_symbol, "v8::hidden_stack_trace")
+  V(hidden_stack_trace_symbol, "v8::hidden_stack_trace")                 \
+  V(query_colon_symbol, "(?:)")
 
 // Forward declarations.
 class GCTracer;
@@ -253,8 +253,8 @@ class Isolate;
 class WeakObjectRetainer;
 
 
-typedef HeapObject* (*ExternalStringTableUpdaterCallback)(Heap* heap,
-                                                          Object** pointer);
+typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
+                                                      Object** pointer);
 
 class StoreBufferRebuilder {
  public:
@@ -390,14 +390,10 @@ typedef void (*ScavengingCallback)(Map* map,
 // External strings table is a place where all external strings are
 // registered.  We need to keep track of such strings to properly
 // finalize them.
-// The ExternalStringTable can contain both strings and objects with
-// external resources.  It was not renamed to make the patch simpler.
 class ExternalStringTable {
  public:
   // Registers an external string.
   inline void AddString(String* string);
-  // Registers an external object.
-  inline void AddObject(HeapObject* string);
 
   inline void Iterate(ObjectVisitor* v);
 
@@ -415,10 +411,10 @@ class ExternalStringTable {
 
   inline void Verify();
 
-  inline void AddOldObject(HeapObject* string);
+  inline void AddOldString(String* string);
 
   // Notifies the table that only a prefix of the new list is valid.
-  inline void ShrinkNewObjects(int position);
+  inline void ShrinkNewStrings(int position);
 
   // To speed up scavenge collections new space string are kept
   // separate from old space strings.
@@ -534,6 +530,8 @@ class Heap {
   MUST_USE_RESULT MaybeObject* AllocateJSObject(
       JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
 
+  MUST_USE_RESULT MaybeObject* AllocateJSModule();
+
   // Allocate a JSArray with no elements
   MUST_USE_RESULT MaybeObject* AllocateEmptyJSArray(
       ElementsKind elements_kind,
@@ -825,6 +823,10 @@ class Heap {
   // Allocate a global (but otherwise uninitialized) context.
   MUST_USE_RESULT MaybeObject* AllocateGlobalContext();
 
+  // Allocate a module context.
+  MUST_USE_RESULT MaybeObject* AllocateModuleContext(Context* previous,
+                                                     ScopeInfo* scope_info);
+
   // Allocate a function context.
   MUST_USE_RESULT MaybeObject* AllocateFunctionContext(int length,
                                                        JSFunction* function);
@@ -958,7 +960,7 @@ class Heap {
 
   // Finalizes an external string by deleting the associated external
   // data and clearing the resource pointer.
-  inline void FinalizeExternalString(HeapObject* string);
+  inline void FinalizeExternalString(String* string);
 
   // Allocates an uninitialized object.  The memory is non-executable if the
   // hardware and OS allow.
@@ -1331,7 +1333,8 @@ class Heap {
 
   // Adjusts the amount of registered external memory.
   // Returns the adjusted value.
-  inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
+  inline intptr_t AdjustAmountOfExternalAllocatedMemory(
+      intptr_t change_in_bytes);
 
   // Allocate uninitialized fixed array.
   MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length);
@@ -1339,7 +1342,7 @@ class Heap {
                                                      PretenureFlag pretenure);
 
   inline intptr_t PromotedTotalSize() {
-    return PromotedSpaceSize() + PromotedExternalMemorySize();
+    return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
   }
 
   // True if we have reached the allocation limit in the old generation that
@@ -1360,19 +1363,6 @@ class Heap {
   static const intptr_t kMinimumAllocationLimit =
       8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
 
-  // When we sweep lazily we initially guess that there is no garbage on the
-  // heap and set the limits for the next GC accordingly.  As we sweep we find
-  // out that some of the pages contained garbage and we have to adjust
-  // downwards the size of the heap.  This means the limits that control the
-  // timing of the next GC also need to be adjusted downwards.
-  void LowerOldGenLimits(intptr_t adjustment) {
-    size_of_old_gen_at_last_old_space_gc_ -= adjustment;
-    old_gen_promotion_limit_ =
-        OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
-    old_gen_allocation_limit_ =
-        OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
-  }
-
   intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
     const int divisor = FLAG_stress_compaction ? 10 : 3;
     intptr_t limit =
@@ -1416,6 +1406,12 @@ class Heap {
     kRootListLength
   };
 
+  STATIC_CHECK(kUndefinedValueRootIndex == Internals::kUndefinedValueRootIndex);
+  STATIC_CHECK(kNullValueRootIndex == Internals::kNullValueRootIndex);
+  STATIC_CHECK(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
+  STATIC_CHECK(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
+  STATIC_CHECK(kempty_symbolRootIndex == Internals::kEmptySymbolRootIndex);
+
   MUST_USE_RESULT MaybeObject* NumberToString(
       Object* number, bool check_number_string_cache = true);
   MUST_USE_RESULT MaybeObject* Uint32ToString(
@@ -1447,6 +1443,8 @@ class Heap {
   inline bool NextGCIsLikelyToBeFull() {
     if (FLAG_gc_global) return true;
 
+    if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
+
     intptr_t total_promoted = PromotedTotalSize();
 
     intptr_t adjusted_promotion_limit =
@@ -1457,7 +1455,7 @@ class Heap {
     intptr_t adjusted_allocation_limit =
         old_gen_allocation_limit_ - new_space_.Capacity() / 5;
 
-    if (PromotedSpaceSize() >= adjusted_allocation_limit) return true;
+    if (PromotedSpaceSizeOfObjects() >= adjusted_allocation_limit) return true;
 
     return false;
   }
@@ -1495,7 +1493,6 @@ class Heap {
   GCTracer* tracer() { return tracer_; }
 
   // Returns the size of objects residing in non new spaces.
-  intptr_t PromotedSpaceSize();
   intptr_t PromotedSpaceSizeOfObjects();
 
   double total_regexp_code_generated() { return total_regexp_code_generated_; }
@@ -1610,6 +1607,8 @@ class Heap {
   // more expedient to get at the isolate directly from within Heap methods.
   Isolate* isolate_;
 
+  Object* roots_[kRootListLength];
+
   intptr_t code_range_size_;
   int reserved_semispace_size_;
   int max_semispace_size_;
@@ -1651,7 +1650,7 @@ class Heap {
   int gc_post_processing_depth_;
 
   // Returns the amount of external memory registered since last global gc.
-  int PromotedExternalMemorySize();
+  intptr_t PromotedExternalMemorySize();
 
   int ms_count_;  // how many mark-sweep collections happened
   unsigned int gc_count_;  // how many gc happened
@@ -1716,17 +1715,15 @@ class Heap {
 
   // The amount of external memory registered through the API kept alive
   // by global handles
-  int amount_of_external_allocated_memory_;
+  intptr_t amount_of_external_allocated_memory_;
 
   // Caches the amount of external memory registered at the last global gc.
-  int amount_of_external_allocated_memory_at_last_global_gc_;
+  intptr_t amount_of_external_allocated_memory_at_last_global_gc_;
 
   // Indicates that an allocation has failed in the old generation since the
   // last GC.
   int old_gen_exhausted_;
 
-  Object* roots_[kRootListLength];
-
   Object* global_contexts_list_;
 
   StoreBufferRebuilder store_buffer_rebuilder_;
@@ -1850,7 +1847,7 @@ class Heap {
   // Performs a minor collection in new generation.
   void Scavenge();
 
-  static HeapObject* UpdateNewSpaceReferenceInExternalStringTableEntry(
+  static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
       Heap* heap,
       Object** pointer);
 
@@ -1979,13 +1976,6 @@ class Heap {
     return (scavenges_since_last_idle_round_ >= kIdleScavengeThreshold);
   }
 
-  bool WorthStartingGCWhenIdle() {
-    if (contexts_disposed_ > 0) {
-      return true;
-    }
-    return incremental_marking()->WorthActivating();
-  }
-
   // Estimates how many milliseconds a Mark-Sweep would take to complete.
   // In idle notification handler we assume that this function will return:
   // - a number less than 10 for small heaps, which are less than 8Mb.
index 287fe38..c66a7a1 100644 (file)
@@ -416,6 +416,7 @@ void HValue::Kill() {
   SetFlag(kIsDead);
   for (int i = 0; i < OperandCount(); ++i) {
     HValue* operand = OperandAt(i);
+    if (operand == NULL) continue;
     HUseListNode* first = operand->use_list_;
     if (first != NULL && first->value() == this && first->index() == i) {
       operand->use_list_ = first->tail();
@@ -462,7 +463,8 @@ void HValue::PrintChangesTo(StringStream* stream) {
       add_comma = true;                           \
       stream->Add(#type);                         \
     }
-    GVN_FLAG_LIST(PRINT_DO);
+    GVN_TRACKED_FLAG_LIST(PRINT_DO);
+    GVN_UNTRACKED_FLAG_LIST(PRINT_DO);
 #undef PRINT_DO
   }
   stream->Add("]");
@@ -599,6 +601,9 @@ void HInstruction::InsertAfter(HInstruction* previous) {
   SetBlock(block);
   previous->next_ = this;
   if (next != NULL) next->previous_ = this;
+  if (block->last() == previous) {
+    block->set_last(this);
+  }
 }
 
 
@@ -608,6 +613,7 @@ void HInstruction::Verify() {
   HBasicBlock* cur_block = block();
   for (int i = 0; i < OperandCount(); ++i) {
     HValue* other_operand = OperandAt(i);
+    if (other_operand == NULL) continue;
     HBasicBlock* other_block = other_operand->block();
     if (cur_block == other_block) {
       if (!other_operand->IsPhi()) {
@@ -679,11 +685,6 @@ void HCallNamed::PrintDataTo(StringStream* stream) {
 }
 
 
-void HGlobalObject::PrintDataTo(StringStream* stream) {
-  stream->Add("qml_global: %s ", qml_global()?"true":"false");
-  HUnaryOperation::PrintDataTo(stream);
-}
-
 void HCallGlobal::PrintDataTo(StringStream* stream) {
   stream->Add("%o ", *name());
   HUnaryCall::PrintDataTo(stream);
@@ -871,6 +872,17 @@ HValue* HBitwise::Canonicalize() {
 }
 
 
+HValue* HBitNot::Canonicalize() {
+  // Optimize ~~x, a common pattern used for ToInt32(x).
+  if (value()->IsBitNot()) {
+    HValue* result = HBitNot::cast(value())->value();
+    ASSERT(result->representation().IsInteger32());
+    return result;
+  }
+  return this;
+}
+
+
 HValue* HAdd::Canonicalize() {
   if (!representation().IsInteger32()) return this;
   if (CheckUsesForFlag(kTruncatingToInt32)) ClearFlag(kCanOverflow);
@@ -921,6 +933,62 @@ void HJSArrayLength::PrintDataTo(StringStream* stream) {
 }
 
 
+HValue* HUnaryMathOperation::Canonicalize() {
+  if (op() == kMathFloor) {
+    // If the input is integer32 then we replace the floor instruction
+    // with its input. This happens before the representation changes are
+    // introduced.
+    if (value()->representation().IsInteger32()) return value();
+
+#ifdef V8_TARGET_ARCH_ARM
+    if (value()->IsDiv() && (value()->UseCount() == 1)) {
+      // TODO(2038): Implement this optimization for non ARM architectures.
+      HDiv* hdiv = HDiv::cast(value());
+      HValue* left = hdiv->left();
+      HValue* right = hdiv->right();
+      // Try to simplify left and right values of the division.
+      HValue* new_left =
+        LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(left);
+      HValue* new_right =
+        LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(right);
+
+      // Return if left or right are not optimizable.
+      if ((new_left == NULL) || (new_right == NULL)) return this;
+
+      // Insert the new values in the graph.
+      if (new_left->IsInstruction() &&
+          !HInstruction::cast(new_left)->IsLinked()) {
+        HInstruction::cast(new_left)->InsertBefore(this);
+      }
+      if (new_right->IsInstruction() &&
+          !HInstruction::cast(new_right)->IsLinked()) {
+        HInstruction::cast(new_right)->InsertBefore(this);
+      }
+      HMathFloorOfDiv* instr =  new HMathFloorOfDiv(context(),
+          new_left,
+          new_right);
+      // Replace this HMathFloor instruction by the new HMathFloorOfDiv.
+      instr->InsertBefore(this);
+      ReplaceAllUsesWith(instr);
+      Kill();
+      // We know the division had no other uses than this HMathFloor. Delete it.
+      // Also delete the arguments of the division if they are not used any
+      // more.
+      hdiv->DeleteAndReplaceWith(NULL);
+      ASSERT(left->IsChange() || left->IsConstant());
+      ASSERT(right->IsChange() || right->IsConstant());
+      if (left->HasNoUses())  left->DeleteAndReplaceWith(NULL);
+      if (right->HasNoUses())  right->DeleteAndReplaceWith(NULL);
+
+      // Return NULL to remove this instruction from the graph.
+      return NULL;
+    }
+#endif  // V8_TARGET_ARCH_ARM
+  }
+  return this;
+}
+
+
 HValue* HCheckInstanceType::Canonicalize() {
   if (check_ == IS_STRING &&
       !value()->type().IsUninitialized() &&
@@ -1535,6 +1603,7 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
   SetOperandAt(1, object);
   set_representation(Representation::Tagged());
   SetGVNFlag(kDependsOnMaps);
+  int map_transitions = 0;
   for (int i = 0;
        i < types->length() && types_.length() < kMaxLoadPolymorphism;
        ++i) {
@@ -1556,13 +1625,20 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
         case CONSTANT_FUNCTION:
           types_.Add(types->at(i));
           break;
+        case MAP_TRANSITION:
+          // We should just ignore these since they are not relevant to a load
+          // operation.  This means we will deopt if we actually see this map
+          // from optimized code.
+          map_transitions++;
+          break;
         default:
           break;
       }
     }
   }
 
-  if (types_.length() == types->length() && FLAG_deoptimize_uncommon_cases) {
+  if (types_.length() + map_transitions == types->length() &&
+      FLAG_deoptimize_uncommon_cases) {
     SetFlag(kUseGVN);
   } else {
     SetAllSideEffects();
@@ -1738,6 +1814,9 @@ void HStoreNamedField::PrintDataTo(StringStream* stream) {
   stream->Add(" = ");
   value()->PrintNameTo(stream);
   stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : "");
+  if (NeedsWriteBarrier()) {
+    stream->Add(" (write-barrier)");
+  }
   if (!transition().is_null()) {
     stream->Add(" (transition map %p)", *transition());
   }
@@ -2091,6 +2170,17 @@ HValue* HAdd::EnsureAndPropagateNotMinusZero(BitVector* visited) {
 }
 
 
+bool HStoreKeyedFastDoubleElement::NeedsCanonicalization() {
+  // If value was loaded from unboxed double backing store or
+  // converted from an integer then we don't have to canonicalize it.
+  if (value()->IsLoadKeyedFastDoubleElement() ||
+      (value()->IsChange() && HChange::cast(value())->from().IsInteger32())) {
+    return false;
+  }
+  return true;
+}
+
+
 #define H_CONSTANT_INT32(val)                                                  \
 new(zone) HConstant(FACTORY->NewNumberFromInt(val, TENURED),                   \
                     Representation::Integer32())
@@ -2259,6 +2349,13 @@ void HIn::PrintDataTo(StringStream* stream) {
 }
 
 
+void HBitwise::PrintDataTo(StringStream* stream) {
+  stream->Add(Token::Name(op_));
+  stream->Add(" ");
+  HBitwiseBinaryOperation::PrintDataTo(stream);
+}
+
+
 Representation HPhi::InferredRepresentation() {
   bool double_occurred = false;
   bool int32_occurred = false;
index 3854330..4f21db7 100644 (file)
@@ -140,6 +140,7 @@ class LChunkBuilder;
   V(LoadNamedField)                            \
   V(LoadNamedFieldPolymorphic)                 \
   V(LoadNamedGeneric)                          \
+  V(MathFloorOfDiv)                            \
   V(Mod)                                       \
   V(Mul)                                       \
   V(ObjectLiteral)                             \
@@ -188,7 +189,10 @@ class LChunkBuilder;
   V(DateField)                                 \
   V(WrapReceiver)
 
-#define GVN_FLAG_LIST(V)                       \
+#define GVN_TRACKED_FLAG_LIST(V)               \
+  V(NewSpacePromotion)
+
+#define GVN_UNTRACKED_FLAG_LIST(V)             \
   V(Calls)                                     \
   V(InobjectFields)                            \
   V(BackingStoreFields)                        \
@@ -506,14 +510,18 @@ class HUseIterator BASE_EMBEDDED {
 
 // There must be one corresponding kDepends flag for every kChanges flag and
 // the order of the kChanges flags must be exactly the same as of the kDepends
-// flags.
+// flags. All tracked flags should appear before untracked ones.
 enum GVNFlag {
   // Declare global value numbering flags.
 #define DECLARE_FLAG(type) kChanges##type, kDependsOn##type,
-  GVN_FLAG_LIST(DECLARE_FLAG)
+  GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
+  GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
 #undef DECLARE_FLAG
   kAfterLastFlag,
-  kLastFlag = kAfterLastFlag - 1
+  kLastFlag = kAfterLastFlag - 1,
+#define COUNT_FLAG(type) + 1
+  kNumberOfTrackedSideEffects = 0 GVN_TRACKED_FLAG_LIST(COUNT_FLAG)
+#undef COUNT_FLAG
 };
 
 typedef EnumSet<GVNFlag> GVNFlagSet;
@@ -530,6 +538,10 @@ class HValue: public ZoneObject {
     // implement DataEquals(), which will be used to determine if other
     // occurrences of the instruction are indeed the same.
     kUseGVN,
+    // Track instructions that are dominating side effects. If an instruction
+    // sets this flag, it must implement SetSideEffectDominator() and should
+    // indicate which side effects to track by setting GVN flags.
+    kTrackSideEffectDominators,
     kCanOverflow,
     kBailoutOnMinusZero,
     kCanBeDivByZero,
@@ -544,6 +556,12 @@ class HValue: public ZoneObject {
 
   static const int kChangesToDependsFlagsLeftShift = 1;
 
+  static GVNFlag ChangesFlagFromInt(int x) {
+    return static_cast<GVNFlag>(x * 2);
+  }
+  static GVNFlag DependsOnFlagFromInt(int x) {
+    return static_cast<GVNFlag>(x * 2 + 1);
+  }
   static GVNFlagSet ConvertChangesToDependsFlags(GVNFlagSet flags) {
     return GVNFlagSet(flags.ToIntegral() << kChangesToDependsFlagsLeftShift);
   }
@@ -726,6 +744,13 @@ class HValue: public ZoneObject {
 
   virtual HType CalculateInferredType();
 
+  // This function must be overridden for instructions which have the
+  // kTrackSideEffectDominators flag set, to track instructions that are
+  // dominating side effects.
+  virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
+    UNREACHABLE();
+  }
+
 #ifdef DEBUG
   virtual void Verify() = 0;
 #endif
@@ -756,7 +781,8 @@ class HValue: public ZoneObject {
     GVNFlagSet result;
     // Create changes mask.
 #define ADD_FLAG(type) result.Add(kDependsOn##type);
-  GVN_FLAG_LIST(ADD_FLAG)
+  GVN_TRACKED_FLAG_LIST(ADD_FLAG)
+  GVN_UNTRACKED_FLAG_LIST(ADD_FLAG)
 #undef ADD_FLAG
     return result;
   }
@@ -765,7 +791,8 @@ class HValue: public ZoneObject {
     GVNFlagSet result;
     // Create changes mask.
 #define ADD_FLAG(type) result.Add(kChanges##type);
-  GVN_FLAG_LIST(ADD_FLAG)
+  GVN_TRACKED_FLAG_LIST(ADD_FLAG)
+  GVN_UNTRACKED_FLAG_LIST(ADD_FLAG)
 #undef ADD_FLAG
     return result;
   }
@@ -781,6 +808,7 @@ class HValue: public ZoneObject {
   // an executing program (i.e. are not safe to repeat, move or remove);
   static GVNFlagSet AllObservableSideEffectsFlagSet() {
     GVNFlagSet result = AllChangesFlagSet();
+    result.Remove(kChangesNewSpacePromotion);
     result.Remove(kChangesElementsKind);
     result.Remove(kChangesElementsPointer);
     result.Remove(kChangesMaps);
@@ -1196,6 +1224,7 @@ class HChange: public HUnaryOperation {
     SetFlag(kUseGVN);
     if (deoptimize_on_undefined) SetFlag(kDeoptimizeOnUndefined);
     if (is_truncating) SetFlag(kTruncatingToInt32);
+    if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion);
   }
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
@@ -1321,6 +1350,7 @@ class HStackCheck: public HTemplateInstruction<1> {
 
   HStackCheck(HValue* context, Type type) : type_(type) {
     SetOperandAt(0, context);
+    SetGVNFlag(kChangesNewSpacePromotion);
   }
 
   HValue* context() { return OperandAt(0); }
@@ -1354,13 +1384,15 @@ class HEnterInlined: public HTemplateInstruction<0> {
                 FunctionLiteral* function,
                 CallKind call_kind,
                 bool is_construct,
-                Variable* arguments)
+                Variable* arguments_var,
+                ZoneList<HValue*>* arguments_values)
       : closure_(closure),
         arguments_count_(arguments_count),
         function_(function),
         call_kind_(call_kind),
         is_construct_(is_construct),
-        arguments_(arguments) {
+        arguments_var_(arguments_var),
+        arguments_values_(arguments_values) {
   }
 
   virtual void PrintDataTo(StringStream* stream);
@@ -1375,7 +1407,8 @@ class HEnterInlined: public HTemplateInstruction<0> {
     return Representation::None();
   }
 
-  Variable* arguments() { return arguments_; }
+  Variable* arguments_var() { return arguments_var_; }
+  ZoneList<HValue*>* arguments_values() { return arguments_values_; }
 
   DECLARE_CONCRETE_INSTRUCTION(EnterInlined)
 
@@ -1385,19 +1418,28 @@ class HEnterInlined: public HTemplateInstruction<0> {
   FunctionLiteral* function_;
   CallKind call_kind_;
   bool is_construct_;
-  Variable* arguments_;
+  Variable* arguments_var_;
+  ZoneList<HValue*>* arguments_values_;
 };
 
 
 class HLeaveInlined: public HTemplateInstruction<0> {
  public:
-  HLeaveInlined() {}
+  explicit HLeaveInlined(bool arguments_pushed)
+      : arguments_pushed_(arguments_pushed) { }
 
   virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
+  bool arguments_pushed() {
+    return arguments_pushed_;
+  }
+
   DECLARE_CONCRETE_INSTRUCTION(LeaveInlined)
+
+ private:
+  bool arguments_pushed_;
 };
 
 
@@ -1508,30 +1550,19 @@ class HDeclareGlobals: public HUnaryOperation {
 
 class HGlobalObject: public HUnaryOperation {
  public:
-  explicit HGlobalObject(HValue* context) : HUnaryOperation(context), qml_global_(false) {
+  explicit HGlobalObject(HValue* context) : HUnaryOperation(context) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
   }
 
-  virtual void PrintDataTo(StringStream* stream);
-
   DECLARE_CONCRETE_INSTRUCTION(GlobalObject)
 
   virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
-  bool qml_global() { return qml_global_; }
-  void set_qml_global(bool v) { qml_global_ = v; }
-
  protected:
-  virtual bool DataEquals(HValue* other) {
-      HGlobalObject* o = HGlobalObject::cast(other);
-      return o->qml_global_ == qml_global_;
-  }
-
- private:
-  bool qml_global_;
+  virtual bool DataEquals(HValue* other) { return true; }
 };
 
 
@@ -1616,14 +1647,26 @@ class HInvokeFunction: public HBinaryCall {
       : HBinaryCall(context, function, argument_count) {
   }
 
+  HInvokeFunction(HValue* context,
+                  HValue* function,
+                  Handle<JSFunction> known_function,
+                  int argument_count)
+      : HBinaryCall(context, function, argument_count),
+        known_function_(known_function) {
+  }
+
   virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
   HValue* context() { return first(); }
   HValue* function() { return second(); }
+  Handle<JSFunction> known_function() { return known_function_; }
 
   DECLARE_CONCRETE_INSTRUCTION(InvokeFunction)
+
+ private:
+  Handle<JSFunction> known_function_;
 };
 
 
@@ -1711,7 +1754,7 @@ class HCallFunction: public HBinaryCall {
 class HCallGlobal: public HUnaryCall {
  public:
   HCallGlobal(HValue* context, Handle<String> name, int argument_count)
-      : HUnaryCall(context, argument_count), name_(name), qml_global_(false) {
+      : HUnaryCall(context, argument_count), name_(name) {
   }
 
   virtual void PrintDataTo(StringStream* stream);
@@ -1723,14 +1766,10 @@ class HCallGlobal: public HUnaryCall {
     return Representation::Tagged();
   }
 
-  bool qml_global() { return qml_global_; }
-  void set_qml_global(bool v) { qml_global_ = v; }
-
   DECLARE_CONCRETE_INSTRUCTION(CallGlobal)
 
  private:
   Handle<String> name_;
-  bool qml_global_;
 };
 
 
@@ -1880,6 +1919,8 @@ class HBitNot: public HUnaryOperation {
   }
   virtual HType CalculateInferredType();
 
+  virtual HValue* Canonicalize();
+
   DECLARE_CONCRETE_INSTRUCTION(BitNot)
 
  protected:
@@ -1902,6 +1943,7 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
       case kMathAbs:
         set_representation(Representation::Tagged());
         SetFlag(kFlexibleRepresentation);
+        SetGVNFlag(kChangesNewSpacePromotion);
         break;
       case kMathSqrt:
       case kMathPowHalf:
@@ -1910,6 +1952,7 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
       case kMathCos:
       case kMathTan:
         set_representation(Representation::Double());
+        SetGVNFlag(kChangesNewSpacePromotion);
         break;
       default:
         UNREACHABLE();
@@ -1950,15 +1993,7 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
     }
   }
 
-  virtual HValue* Canonicalize() {
-    // If the input is integer32 then we replace the floor instruction
-    // with its inputs.  This happens before the representation changes are
-    // introduced.
-    if (op() == kMathFloor) {
-      if (value()->representation().IsInteger32()) return value();
-    }
-    return this;
-  }
+  virtual HValue* Canonicalize();
 
   BuiltinFunctionId op() const { return op_; }
   const char* OpName() const;
@@ -2592,7 +2627,7 @@ class HApplyArguments: public HTemplateInstruction<4> {
 
 class HArgumentsElements: public HTemplateInstruction<0> {
  public:
-  HArgumentsElements() {
+  explicit HArgumentsElements(bool from_inlined) : from_inlined_(from_inlined) {
     // The value produced by this instruction is a pointer into the stack
     // that looks as if it was a smi because of alignment.
     set_representation(Representation::Tagged());
@@ -2605,8 +2640,12 @@ class HArgumentsElements: public HTemplateInstruction<0> {
     return Representation::None();
   }
 
+  bool from_inlined() const { return from_inlined_; }
+
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
+
+  bool from_inlined_;
 };
 
 
@@ -2712,6 +2751,25 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
 };
 
 
+class HMathFloorOfDiv: public HBinaryOperation {
+ public:
+  HMathFloorOfDiv(HValue* context, HValue* left, HValue* right)
+      : HBinaryOperation(context, left, right) {
+    set_representation(Representation::Integer32());
+    SetFlag(kUseGVN);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) {
+    return Representation::Integer32();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv)
+
+ protected:
+  virtual bool DataEquals(HValue* other) { return true; }
+};
+
+
 class HArithmeticBinaryOperation: public HBinaryOperation {
  public:
   HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right)
@@ -3126,6 +3184,7 @@ class HPower: public HTemplateInstruction<2> {
     SetOperandAt(1, right);
     set_representation(Representation::Double());
     SetFlag(kUseGVN);
+    SetGVNFlag(kChangesNewSpacePromotion);
   }
 
   HValue* left() { return OperandAt(0); }
@@ -3325,6 +3384,8 @@ class HBitwise: public HBitwiseBinaryOperation {
                                    HValue* left,
                                    HValue* right);
 
+  virtual void PrintDataTo(StringStream* stream);
+
   DECLARE_CONCRETE_INSTRUCTION(Bitwise)
 
  protected:
@@ -3572,6 +3633,12 @@ inline bool StoringValueNeedsWriteBarrier(HValue* value) {
 }
 
 
+inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
+                                            HValue* new_space_dominator) {
+  return !object->IsAllocateObject() || (object != new_space_dominator);
+}
+
+
 class HStoreGlobalCell: public HUnaryOperation {
  public:
   HStoreGlobalCell(HValue* value,
@@ -3879,15 +3946,27 @@ class HLoadFunctionPrototype: public HUnaryOperation {
   virtual bool DataEquals(HValue* other) { return true; }
 };
 
+class ArrayInstructionInterface {
+ public:
+  virtual HValue* GetKey() = 0;
+  virtual void SetKey(HValue* key) = 0;
+  virtual void SetIndexOffset(uint32_t index_offset) = 0;
+  virtual bool IsDehoisted() = 0;
+  virtual void SetDehoisted(bool is_dehoisted) = 0;
+  virtual ~ArrayInstructionInterface() { };
+};
 
-class HLoadKeyedFastElement: public HTemplateInstruction<2> {
+class HLoadKeyedFastElement
+    : public HTemplateInstruction<2>, public ArrayInstructionInterface {
  public:
   enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK };
 
   HLoadKeyedFastElement(HValue* obj,
                         HValue* key,
                         HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK)
-      : hole_check_mode_(hole_check_mode) {
+      : hole_check_mode_(hole_check_mode),
+        index_offset_(0),
+        is_dehoisted_(false) {
     SetOperandAt(0, obj);
     SetOperandAt(1, key);
     set_representation(Representation::Tagged());
@@ -3897,6 +3976,12 @@ class HLoadKeyedFastElement: public HTemplateInstruction<2> {
 
   HValue* object() { return OperandAt(0); }
   HValue* key() { return OperandAt(1); }
+  uint32_t index_offset() { return index_offset_; }
+  void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
+  HValue* GetKey() { return key(); }
+  void SetKey(HValue* key) { SetOperandAt(1, key); }
+  bool IsDehoisted() { return is_dehoisted_; }
+  void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
 
   virtual Representation RequiredInputRepresentation(int index) {
     // The key is supposed to be Integer32.
@@ -3915,17 +4000,23 @@ class HLoadKeyedFastElement: public HTemplateInstruction<2> {
   virtual bool DataEquals(HValue* other) {
     if (!other->IsLoadKeyedFastElement()) return false;
     HLoadKeyedFastElement* other_load = HLoadKeyedFastElement::cast(other);
+    if (is_dehoisted_ && index_offset_ != other_load->index_offset_)
+      return false;
     return hole_check_mode_ == other_load->hole_check_mode_;
   }
 
  private:
   HoleCheckMode hole_check_mode_;
+  uint32_t index_offset_;
+  bool is_dehoisted_;
 };
 
 
-class HLoadKeyedFastDoubleElement: public HTemplateInstruction<2> {
+class HLoadKeyedFastDoubleElement
+    : public HTemplateInstruction<2>, public ArrayInstructionInterface {
  public:
-  HLoadKeyedFastDoubleElement(HValue* elements, HValue* key) {
+  HLoadKeyedFastDoubleElement(HValue* elements, HValue* key)
+      : index_offset_(0), is_dehoisted_(false) {
     SetOperandAt(0, elements);
     SetOperandAt(1, key);
     set_representation(Representation::Double());
@@ -3935,6 +4026,12 @@ class HLoadKeyedFastDoubleElement: public HTemplateInstruction<2> {
 
   HValue* elements() { return OperandAt(0); }
   HValue* key() { return OperandAt(1); }
+  uint32_t index_offset() { return index_offset_; }
+  void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
+  HValue* GetKey() { return key(); }
+  void SetKey(HValue* key) { SetOperandAt(1, key); }
+  bool IsDehoisted() { return is_dehoisted_; }
+  void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
 
   virtual Representation RequiredInputRepresentation(int index) {
     // The key is supposed to be Integer32.
@@ -3949,15 +4046,22 @@ class HLoadKeyedFastDoubleElement: public HTemplateInstruction<2> {
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
+
+ private:
+  uint32_t index_offset_;
+  bool is_dehoisted_;
 };
 
 
-class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> {
+class HLoadKeyedSpecializedArrayElement
+    : public HTemplateInstruction<2>, public ArrayInstructionInterface {
  public:
   HLoadKeyedSpecializedArrayElement(HValue* external_elements,
                                     HValue* key,
                                     ElementsKind elements_kind)
-      :  elements_kind_(elements_kind) {
+      :  elements_kind_(elements_kind),
+         index_offset_(0),
+         is_dehoisted_(false) {
     SetOperandAt(0, external_elements);
     SetOperandAt(1, key);
     if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
@@ -3985,6 +4089,12 @@ class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> {
   HValue* external_pointer() { return OperandAt(0); }
   HValue* key() { return OperandAt(1); }
   ElementsKind elements_kind() const { return elements_kind_; }
+  uint32_t index_offset() { return index_offset_; }
+  void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
+  HValue* GetKey() { return key(); }
+  void SetKey(HValue* key) { SetOperandAt(1, key); }
+  bool IsDehoisted() { return is_dehoisted_; }
+  void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
 
   virtual Range* InferRange(Zone* zone);
 
@@ -4000,6 +4110,8 @@ class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> {
 
  private:
   ElementsKind elements_kind_;
+  uint32_t index_offset_;
+  bool is_dehoisted_;
 };
 
 
@@ -4038,9 +4150,12 @@ class HStoreNamedField: public HTemplateInstruction<2> {
                    int offset)
       : name_(name),
         is_in_object_(in_object),
-        offset_(offset) {
+        offset_(offset),
+        new_space_dominator_(NULL) {
     SetOperandAt(0, obj);
     SetOperandAt(1, val);
+    SetFlag(kTrackSideEffectDominators);
+    SetGVNFlag(kDependsOnNewSpacePromotion);
     if (is_in_object_) {
       SetGVNFlag(kChangesInobjectFields);
     } else {
@@ -4053,6 +4168,10 @@ class HStoreNamedField: public HTemplateInstruction<2> {
   virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
+  virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
+    ASSERT(side_effect == kChangesNewSpacePromotion);
+    new_space_dominator_ = dominator;
+  }
   virtual void PrintDataTo(StringStream* stream);
 
   HValue* object() { return OperandAt(0); }
@@ -4063,9 +4182,11 @@ class HStoreNamedField: public HTemplateInstruction<2> {
   int offset() const { return offset_; }
   Handle<Map> transition() const { return transition_; }
   void set_transition(Handle<Map> map) { transition_ = map; }
+  HValue* new_space_dominator() const { return new_space_dominator_; }
 
   bool NeedsWriteBarrier() {
-    return StoringValueNeedsWriteBarrier(value());
+    return StoringValueNeedsWriteBarrier(value()) &&
+        ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
   }
 
  private:
@@ -4073,6 +4194,7 @@ class HStoreNamedField: public HTemplateInstruction<2> {
   bool is_in_object_;
   int offset_;
   Handle<Map> transition_;
+  HValue* new_space_dominator_;
 };
 
 
@@ -4111,11 +4233,12 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> {
 };
 
 
-class HStoreKeyedFastElement: public HTemplateInstruction<3> {
+class HStoreKeyedFastElement
+    : public HTemplateInstruction<3>, public ArrayInstructionInterface {
  public:
   HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val,
                          ElementsKind elements_kind = FAST_ELEMENTS)
-      : elements_kind_(elements_kind) {
+      : elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) {
     SetOperandAt(0, obj);
     SetOperandAt(1, key);
     SetOperandAt(2, val);
@@ -4135,6 +4258,12 @@ class HStoreKeyedFastElement: public HTemplateInstruction<3> {
   bool value_is_smi() {
     return elements_kind_ == FAST_SMI_ONLY_ELEMENTS;
   }
+  uint32_t index_offset() { return index_offset_; }
+  void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
+  HValue* GetKey() { return key(); }
+  void SetKey(HValue* key) { SetOperandAt(1, key); }
+  bool IsDehoisted() { return is_dehoisted_; }
+  void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
 
   bool NeedsWriteBarrier() {
     if (value_is_smi()) {
@@ -4150,14 +4279,18 @@ class HStoreKeyedFastElement: public HTemplateInstruction<3> {
 
  private:
   ElementsKind elements_kind_;
+  uint32_t index_offset_;
+  bool is_dehoisted_;
 };
 
 
-class HStoreKeyedFastDoubleElement: public HTemplateInstruction<3> {
+class HStoreKeyedFastDoubleElement
+    : public HTemplateInstruction<3>, public ArrayInstructionInterface {
  public:
   HStoreKeyedFastDoubleElement(HValue* elements,
                                HValue* key,
-                               HValue* val) {
+                               HValue* val)
+      : index_offset_(0), is_dehoisted_(false) {
     SetOperandAt(0, elements);
     SetOperandAt(1, key);
     SetOperandAt(2, val);
@@ -4177,24 +4310,37 @@ class HStoreKeyedFastDoubleElement: public HTemplateInstruction<3> {
   HValue* elements() { return OperandAt(0); }
   HValue* key() { return OperandAt(1); }
   HValue* value() { return OperandAt(2); }
+  uint32_t index_offset() { return index_offset_; }
+  void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
+  HValue* GetKey() { return key(); }
+  void SetKey(HValue* key) { SetOperandAt(1, key); }
+  bool IsDehoisted() { return is_dehoisted_; }
+  void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
 
   bool NeedsWriteBarrier() {
     return StoringValueNeedsWriteBarrier(value());
   }
 
+  bool NeedsCanonicalization();
+
   virtual void PrintDataTo(StringStream* stream);
 
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement)
+
+ private:
+  uint32_t index_offset_;
+  bool is_dehoisted_;
 };
 
 
-class HStoreKeyedSpecializedArrayElement: public HTemplateInstruction<3> {
+class HStoreKeyedSpecializedArrayElement
+    : public HTemplateInstruction<3>, public ArrayInstructionInterface {
  public:
   HStoreKeyedSpecializedArrayElement(HValue* external_elements,
                                      HValue* key,
                                      HValue* val,
                                      ElementsKind elements_kind)
-      : elements_kind_(elements_kind) {
+      : elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) {
     SetGVNFlag(kChangesSpecializedArrayElements);
     SetOperandAt(0, external_elements);
     SetOperandAt(1, key);
@@ -4222,11 +4368,19 @@ class HStoreKeyedSpecializedArrayElement: public HTemplateInstruction<3> {
   HValue* key() { return OperandAt(1); }
   HValue* value() { return OperandAt(2); }
   ElementsKind elements_kind() const { return elements_kind_; }
+  uint32_t index_offset() { return index_offset_; }
+  void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
+  HValue* GetKey() { return key(); }
+  void SetKey(HValue* key) { SetOperandAt(1, key); }
+  bool IsDehoisted() { return is_dehoisted_; }
+  void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
 
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement)
 
  private:
   ElementsKind elements_kind_;
+  uint32_t index_offset_;
+  bool is_dehoisted_;
 };
 
 
@@ -4275,6 +4429,7 @@ class HTransitionElementsKind: public HTemplateInstruction<1> {
     SetFlag(kUseGVN);
     SetGVNFlag(kChangesElementsKind);
     SetGVNFlag(kChangesElementsPointer);
+    SetGVNFlag(kChangesNewSpacePromotion);
     set_representation(Representation::Tagged());
   }
 
@@ -4336,6 +4491,7 @@ class HStringCharCodeAt: public HTemplateInstruction<3> {
     set_representation(Representation::Integer32());
     SetFlag(kUseGVN);
     SetGVNFlag(kDependsOnMaps);
+    SetGVNFlag(kChangesNewSpacePromotion);
   }
 
   virtual Representation RequiredInputRepresentation(int index) {
@@ -4367,6 +4523,7 @@ class HStringCharFromCode: public HTemplateInstruction<2> {
     SetOperandAt(1, char_code);
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
+    SetGVNFlag(kChangesNewSpacePromotion);
   }
 
   virtual Representation RequiredInputRepresentation(int index) {
@@ -4419,6 +4576,7 @@ class HAllocateObject: public HTemplateInstruction<1> {
       : constructor_(constructor) {
     SetOperandAt(0, context);
     set_representation(Representation::Tagged());
+    SetGVNFlag(kChangesNewSpacePromotion);
   }
 
   // Maximum instance size for which allocations will be inlined.
@@ -4467,6 +4625,7 @@ class HFastLiteral: public HMaterializedLiteral<1> {
         boilerplate_(boilerplate),
         total_size_(total_size) {
     SetOperandAt(0, context);
+    SetGVNFlag(kChangesNewSpacePromotion);
   }
 
   // Maximum depth and total number of elements and properties for literal
@@ -4502,6 +4661,7 @@ class HArrayLiteral: public HMaterializedLiteral<1> {
         length_(length),
         boilerplate_object_(boilerplate_object) {
     SetOperandAt(0, context);
+    SetGVNFlag(kChangesNewSpacePromotion);
   }
 
   HValue* context() { return OperandAt(0); }
@@ -4542,6 +4702,7 @@ class HObjectLiteral: public HMaterializedLiteral<1> {
         fast_elements_(fast_elements),
         has_function_(has_function) {
     SetOperandAt(0, context);
+    SetGVNFlag(kChangesNewSpacePromotion);
   }
 
   HValue* context() { return OperandAt(0); }
@@ -4603,6 +4764,7 @@ class HFunctionLiteral: public HTemplateInstruction<1> {
       : shared_info_(shared), pretenure_(pretenure) {
     SetOperandAt(0, context);
     set_representation(Representation::Tagged());
+    SetGVNFlag(kChangesNewSpacePromotion);
   }
 
   HValue* context() { return OperandAt(0); }
index ac7944d..99beda0 100644 (file)
@@ -113,7 +113,6 @@ void HBasicBlock::AddInstruction(HInstruction* instr) {
     first_ = last_ = entry;
   }
   instr->InsertAfter(last_);
-  last_ = instr;
 }
 
 
@@ -165,11 +164,15 @@ void HBasicBlock::Finish(HControlInstruction* end) {
 }
 
 
-void HBasicBlock::Goto(HBasicBlock* block, bool drop_extra) {
+void HBasicBlock::Goto(HBasicBlock* block, FunctionState* state) {
+  bool drop_extra = state != NULL && state->drop_extra();
+  bool arguments_pushed = state != NULL && state->arguments_pushed();
+
   if (block->IsInlineReturnTarget()) {
-    AddInstruction(new(zone()) HLeaveInlined);
+    AddInstruction(new(zone()) HLeaveInlined(arguments_pushed));
     last_environment_ = last_environment()->DiscardInlined(drop_extra);
   }
+
   AddSimulate(AstNode::kNoNumber);
   HGoto* instr = new(zone()) HGoto(block);
   Finish(instr);
@@ -178,10 +181,13 @@ void HBasicBlock::Goto(HBasicBlock* block, bool drop_extra) {
 
 void HBasicBlock::AddLeaveInlined(HValue* return_value,
                                   HBasicBlock* target,
-                                  bool drop_extra) {
+                                  FunctionState* state) {
+  bool drop_extra = state != NULL && state->drop_extra();
+  bool arguments_pushed = state != NULL && state->arguments_pushed();
+
   ASSERT(target->IsInlineReturnTarget());
   ASSERT(return_value != NULL);
-  AddInstruction(new(zone()) HLeaveInlined);
+  AddInstruction(new(zone()) HLeaveInlined(arguments_pushed));
   last_environment_ = last_environment()->DiscardInlined(drop_extra);
   last_environment()->Push(return_value);
   AddSimulate(AstNode::kNoNumber);
@@ -606,6 +612,7 @@ HGraphBuilder::HGraphBuilder(CompilationInfo* info,
       graph_(NULL),
       current_block_(NULL),
       inlined_count_(0),
+      globals_(10),
       zone_(info->isolate()->zone()),
       inline_bailout_(false) {
   // This is not initialized in the initializer list because the
@@ -1143,14 +1150,39 @@ void HRangeAnalysis::AddRange(HValue* value, Range* range) {
 
 
 void TraceGVN(const char* msg, ...) {
-  if (FLAG_trace_gvn) {
-    va_list arguments;
-    va_start(arguments, msg);
-    OS::VPrint(msg, arguments);
-    va_end(arguments);
-  }
+  va_list arguments;
+  va_start(arguments, msg);
+  OS::VPrint(msg, arguments);
+  va_end(arguments);
 }
 
+// Wrap TraceGVN in macros to avoid the expense of evaluating its arguments when
+// --trace-gvn is off.
+#define TRACE_GVN_1(msg, a1)                    \
+  if (FLAG_trace_gvn) {                         \
+    TraceGVN(msg, a1);                          \
+  }
+
+#define TRACE_GVN_2(msg, a1, a2)                \
+  if (FLAG_trace_gvn) {                         \
+    TraceGVN(msg, a1, a2);                      \
+  }
+
+#define TRACE_GVN_3(msg, a1, a2, a3)            \
+  if (FLAG_trace_gvn) {                         \
+    TraceGVN(msg, a1, a2, a3);                  \
+  }
+
+#define TRACE_GVN_4(msg, a1, a2, a3, a4)        \
+  if (FLAG_trace_gvn) {                         \
+    TraceGVN(msg, a1, a2, a3, a4);              \
+  }
+
+#define TRACE_GVN_5(msg, a1, a2, a3, a4, a5)    \
+  if (FLAG_trace_gvn) {                         \
+    TraceGVN(msg, a1, a2, a3, a4, a5);          \
+  }
+
 
 HValueMap::HValueMap(Zone* zone, const HValueMap* other)
     : array_size_(other->array_size_),
@@ -1321,6 +1353,38 @@ void HValueMap::Insert(HValue* value) {
 }
 
 
+HSideEffectMap::HSideEffectMap() : count_(0) {
+  memset(data_, 0, kNumberOfTrackedSideEffects * kPointerSize);
+}
+
+
+HSideEffectMap::HSideEffectMap(HSideEffectMap* other) : count_(other->count_) {
+  memcpy(data_, other->data_, kNumberOfTrackedSideEffects * kPointerSize);
+}
+
+
+void HSideEffectMap::Kill(GVNFlagSet flags) {
+  for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
+    GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
+    if (flags.Contains(changes_flag)) {
+      if (data_[i] != NULL) count_--;
+      data_[i] = NULL;
+    }
+  }
+}
+
+
+void HSideEffectMap::Store(GVNFlagSet flags, HInstruction* instr) {
+  for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
+    GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
+    if (flags.Contains(changes_flag)) {
+      if (data_[i] == NULL) count_++;
+      data_[i] = instr;
+    }
+  }
+}
+
+
 class HStackCheckEliminator BASE_EMBEDDED {
  public:
   explicit HStackCheckEliminator(HGraph* graph) : graph_(graph) { }
@@ -1427,7 +1491,9 @@ class HGlobalValueNumberer BASE_EMBEDDED {
   GVNFlagSet CollectSideEffectsOnPathsToDominatedBlock(
       HBasicBlock* dominator,
       HBasicBlock* dominated);
-  void AnalyzeBlock(HBasicBlock* block, HValueMap* map);
+  void AnalyzeBlock(HBasicBlock* block,
+                    HValueMap* map,
+                    HSideEffectMap* dominators);
   void ComputeBlockSideEffects();
   void LoopInvariantCodeMotion();
   void ProcessLoopBlock(HBasicBlock* block,
@@ -1465,7 +1531,8 @@ bool HGlobalValueNumberer::Analyze() {
     LoopInvariantCodeMotion();
   }
   HValueMap* map = new(zone()) HValueMap();
-  AnalyzeBlock(graph_->entry_block(), map);
+  HSideEffectMap side_effect_dominators;
+  AnalyzeBlock(graph_->entry_block(), map, &side_effect_dominators);
   return removed_side_effects_;
 }
 
@@ -1510,14 +1577,100 @@ void HGlobalValueNumberer::ComputeBlockSideEffects() {
 }
 
 
+SmartArrayPointer<char> GetGVNFlagsString(GVNFlagSet flags) {
+  char underlying_buffer[kLastFlag * 128];
+  Vector<char> buffer(underlying_buffer, sizeof(underlying_buffer));
+#if DEBUG
+  int offset = 0;
+  const char* separator = "";
+  const char* comma = ", ";
+  buffer[0] = 0;
+  uint32_t set_depends_on = 0;
+  uint32_t set_changes = 0;
+  for (int bit = 0; bit < kLastFlag; ++bit) {
+    if ((flags.ToIntegral() & (1 << bit)) != 0) {
+      if (bit % 2 == 0) {
+        set_changes++;
+      } else {
+        set_depends_on++;
+      }
+    }
+  }
+  bool positive_changes = set_changes < (kLastFlag / 2);
+  bool positive_depends_on = set_depends_on < (kLastFlag / 2);
+  if (set_changes > 0) {
+    if (positive_changes) {
+      offset += OS::SNPrintF(buffer + offset, "changes [");
+    } else {
+      offset += OS::SNPrintF(buffer + offset, "changes all except [");
+    }
+    for (int bit = 0; bit < kLastFlag; ++bit) {
+      if (((flags.ToIntegral() & (1 << bit)) != 0) == positive_changes) {
+        switch (static_cast<GVNFlag>(bit)) {
+#define DECLARE_FLAG(type)                                       \
+          case kChanges##type:                                   \
+            offset += OS::SNPrintF(buffer + offset, separator);  \
+            offset += OS::SNPrintF(buffer + offset, #type);      \
+            separator = comma;                                   \
+            break;
+GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
+GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
+#undef DECLARE_FLAG
+          default:
+              break;
+        }
+      }
+    }
+    offset += OS::SNPrintF(buffer + offset, "]");
+  }
+  if (set_depends_on > 0) {
+    separator = "";
+    if (set_changes > 0) {
+      offset += OS::SNPrintF(buffer + offset, ", ");
+    }
+    if (positive_depends_on) {
+      offset += OS::SNPrintF(buffer + offset, "depends on [");
+    } else {
+      offset += OS::SNPrintF(buffer + offset, "depends on all except [");
+    }
+    for (int bit = 0; bit < kLastFlag; ++bit) {
+      if (((flags.ToIntegral() & (1 << bit)) != 0) == positive_depends_on) {
+        switch (static_cast<GVNFlag>(bit)) {
+#define DECLARE_FLAG(type)                                       \
+          case kDependsOn##type:                                 \
+            offset += OS::SNPrintF(buffer + offset, separator);  \
+            offset += OS::SNPrintF(buffer + offset, #type);      \
+            separator = comma;                                   \
+            break;
+GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
+GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
+#undef DECLARE_FLAG
+          default:
+            break;
+        }
+      }
+    }
+    offset += OS::SNPrintF(buffer + offset, "]");
+  }
+#else
+  OS::SNPrintF(buffer, "0x%08X", flags.ToIntegral());
+#endif
+  size_t string_len = strlen(underlying_buffer) + 1;
+  ASSERT(string_len <= sizeof(underlying_buffer));
+  char* result = new char[strlen(underlying_buffer) + 1];
+  memcpy(result, underlying_buffer, string_len);
+  return SmartArrayPointer<char>(result);
+}
+
+
 void HGlobalValueNumberer::LoopInvariantCodeMotion() {
   for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
     HBasicBlock* block = graph_->blocks()->at(i);
     if (block->IsLoopHeader()) {
       GVNFlagSet side_effects = loop_side_effects_[block->block_id()];
-      TraceGVN("Try loop invariant motion for block B%d effects=0x%x\n",
-               block->block_id(),
-               side_effects.ToIntegral());
+      TRACE_GVN_2("Try loop invariant motion for block B%d %s\n",
+                  block->block_id(),
+                  *GetGVNFlagsString(side_effects));
 
       GVNFlagSet accumulated_first_time_depends;
       GVNFlagSet accumulated_first_time_changes;
@@ -1540,20 +1693,19 @@ void HGlobalValueNumberer::ProcessLoopBlock(
     GVNFlagSet* first_time_changes) {
   HBasicBlock* pre_header = loop_header->predecessors()->at(0);
   GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills);
-  TraceGVN("Loop invariant motion for B%d depends_flags=0x%x\n",
-           block->block_id(),
-           depends_flags.ToIntegral());
+  TRACE_GVN_2("Loop invariant motion for B%d %s\n",
+              block->block_id(),
+              *GetGVNFlagsString(depends_flags));
   HInstruction* instr = block->first();
   while (instr != NULL) {
     HInstruction* next = instr->next();
     bool hoisted = false;
     if (instr->CheckFlag(HValue::kUseGVN)) {
-      TraceGVN("Checking instruction %d (%s) instruction GVN flags 0x%X, "
-               "loop kills 0x%X\n",
-               instr->id(),
-               instr->Mnemonic(),
-               instr->gvn_flags().ToIntegral(),
-               depends_flags.ToIntegral());
+      TRACE_GVN_4("Checking instruction %d (%s) %s. Loop %s\n",
+                  instr->id(),
+                  instr->Mnemonic(),
+                  *GetGVNFlagsString(instr->gvn_flags()),
+                  *GetGVNFlagsString(loop_kills));
       bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags);
       if (instr->IsTransitionElementsKind()) {
         // It's possible to hoist transitions out of a loop as long as the
@@ -1576,15 +1728,19 @@ void HGlobalValueNumberer::ProcessLoopBlock(
         if (trans->transitioned_map()->has_fast_double_elements()) {
           hoist_change_blockers.Add(kChangesArrayElements);
         }
-        TraceGVN("Checking dependencies on HTransitionElementsKind %d (%s) "
-                 "hoist depends blockers 0x%X, hoist change blockers 0x%X, "
-                 "accumulated depends 0x%X, accumulated changes 0x%X\n",
-                 instr->id(),
-                 instr->Mnemonic(),
-                 hoist_depends_blockers.ToIntegral(),
-                 hoist_change_blockers.ToIntegral(),
-                 first_time_depends->ToIntegral(),
-                 first_time_changes->ToIntegral());
+        if (FLAG_trace_gvn) {
+          GVNFlagSet hoist_blockers = hoist_depends_blockers;
+          hoist_blockers.Add(hoist_change_blockers);
+          GVNFlagSet first_time = *first_time_changes;
+          first_time.Add(*first_time_depends);
+          TRACE_GVN_4("Checking dependencies on HTransitionElementsKind "
+                      "%d (%s) hoist blockers: %s; "
+                      "first-time accumulated: %s\n",
+                      instr->id(),
+                      instr->Mnemonic(),
+                      *GetGVNFlagsString(hoist_blockers),
+                      *GetGVNFlagsString(first_time));
+        }
         // It's possible to hoist transition from the current loop loop only if
         // they dominate all of the successor blocks in the same loop and there
         // are not any instructions that have Changes/DependsOn that intervene
@@ -1607,7 +1763,7 @@ void HGlobalValueNumberer::ProcessLoopBlock(
         }
 
         if (inputs_loop_invariant && ShouldMove(instr, loop_header)) {
-          TraceGVN("Hoisting loop invariant instruction %d\n", instr->id());
+          TRACE_GVN_1("Hoisting loop invariant instruction %d\n", instr->id());
           // Move the instruction out of the loop.
           instr->Unlink();
           instr->InsertBefore(pre_header->end());
@@ -1619,8 +1775,18 @@ void HGlobalValueNumberer::ProcessLoopBlock(
     if (!hoisted) {
       // If an instruction is not hoisted, we have to account for its side
       // effects when hoisting later HTransitionElementsKind instructions.
+      GVNFlagSet previous_depends = *first_time_depends;
+      GVNFlagSet previous_changes = *first_time_changes;
       first_time_depends->Add(instr->DependsOnFlags());
       first_time_changes->Add(instr->ChangesFlags());
+      if (!(previous_depends == *first_time_depends)) {
+        TRACE_GVN_1("Updated first-time accumulated %s\n",
+                    *GetGVNFlagsString(*first_time_depends));
+      }
+      if (!(previous_changes == *first_time_changes)) {
+        TRACE_GVN_1("Updated first-time accumulated %s\n",
+                    *GetGVNFlagsString(*first_time_changes));
+      }
     }
     instr = next;
   }
@@ -1660,10 +1826,12 @@ GVNFlagSet HGlobalValueNumberer::CollectSideEffectsOnPathsToDominatedBlock(
 }
 
 
-void HGlobalValueNumberer::AnalyzeBlock(HBasicBlock* block, HValueMap* map) {
-  TraceGVN("Analyzing block B%d%s\n",
-           block->block_id(),
-           block->IsLoopHeader() ? " (loop header)" : "");
+void HGlobalValueNumberer::AnalyzeBlock(HBasicBlock* block,
+                                        HValueMap* map,
+                                        HSideEffectMap* dominators) {
+  TRACE_GVN_2("Analyzing block B%d%s\n",
+              block->block_id(),
+              block->IsLoopHeader() ? " (loop header)" : "");
 
   // If this is a loop header kill everything killed by the loop.
   if (block->IsLoopHeader()) {
@@ -1677,25 +1845,45 @@ void HGlobalValueNumberer::AnalyzeBlock(HBasicBlock* block, HValueMap* map) {
     GVNFlagSet flags = instr->ChangesFlags();
     if (!flags.IsEmpty()) {
       // Clear all instructions in the map that are affected by side effects.
+      // Store instruction as the dominating one for tracked side effects.
       map->Kill(flags);
-      TraceGVN("Instruction %d kills\n", instr->id());
+      dominators->Store(flags, instr);
+      TRACE_GVN_2("Instruction %d %s\n", instr->id(),
+                  *GetGVNFlagsString(flags));
     }
     if (instr->CheckFlag(HValue::kUseGVN)) {
       ASSERT(!instr->HasObservableSideEffects());
       HValue* other = map->Lookup(instr);
       if (other != NULL) {
         ASSERT(instr->Equals(other) && other->Equals(instr));
-        TraceGVN("Replacing value %d (%s) with value %d (%s)\n",
-                 instr->id(),
-                 instr->Mnemonic(),
-                 other->id(),
-                 other->Mnemonic());
+        TRACE_GVN_4("Replacing value %d (%s) with value %d (%s)\n",
+                    instr->id(),
+                    instr->Mnemonic(),
+                    other->id(),
+                    other->Mnemonic());
         if (instr->HasSideEffects()) removed_side_effects_ = true;
         instr->DeleteAndReplaceWith(other);
       } else {
         map->Add(instr);
       }
     }
+    if (instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
+      for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
+        HValue* other = dominators->at(i);
+        GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
+        GVNFlag depends_on_flag = HValue::DependsOnFlagFromInt(i);
+        if (instr->DependsOnFlags().Contains(depends_on_flag) &&
+            (other != NULL)) {
+          TRACE_GVN_5("Side-effect #%d in %d (%s) is dominated by %d (%s)\n",
+                      i,
+                      instr->id(),
+                      instr->Mnemonic(),
+                      other->id(),
+                      other->Mnemonic());
+          instr->SetSideEffectDominator(changes_flag, other);
+        }
+      }
+    }
     instr = next;
   }
 
@@ -1705,20 +1893,22 @@ void HGlobalValueNumberer::AnalyzeBlock(HBasicBlock* block, HValueMap* map) {
     HBasicBlock* dominated = block->dominated_blocks()->at(i);
     // No need to copy the map for the last child in the dominator tree.
     HValueMap* successor_map = (i == length - 1) ? map : map->Copy(zone());
+    HSideEffectMap successor_dominators(dominators);
 
     // Kill everything killed on any path between this block and the
-    // dominated block.
-    // We don't have to traverse these paths if the value map is
-    // already empty.
-    // If the range of block ids (block_id, dominated_id) is empty
-    // there are no such paths.
-    if (!successor_map->IsEmpty() &&
+    // dominated block.  We don't have to traverse these paths if the
+    // value map and the dominators list is already empty.  If the range
+    // of block ids (block_id, dominated_id) is empty there are no such
+    // paths.
+    if ((!successor_map->IsEmpty() || !successor_dominators.IsEmpty()) &&
         block->block_id() + 1 < dominated->block_id()) {
       visited_on_paths_.Clear();
-      successor_map->Kill(CollectSideEffectsOnPathsToDominatedBlock(block,
-                                                                    dominated));
+      GVNFlagSet side_effects_on_all_paths =
+          CollectSideEffectsOnPathsToDominatedBlock(block, dominated);
+      successor_map->Kill(side_effects_on_all_paths);
+      successor_dominators.Kill(side_effects_on_all_paths);
     }
-    AnalyzeBlock(dominated, successor_map);
+    AnalyzeBlock(dominated, successor_map, &successor_dominators);
   }
 }
 
@@ -2178,6 +2368,8 @@ FunctionState::FunctionState(HGraphBuilder* owner,
       return_handling_(return_handling),
       function_return_(NULL),
       test_context_(NULL),
+      entry_(NULL),
+      arguments_elements_(NULL),
       outer_(owner->function_state()) {
   if (outer_ != NULL) {
     // State for an inline function.
@@ -2337,8 +2529,8 @@ void TestContext::ReturnControl(HControlInstruction* instr, int ast_id) {
   instr->SetSuccessorAt(0, empty_true);
   instr->SetSuccessorAt(1, empty_false);
   owner()->current_block()->Finish(instr);
-  empty_true->Goto(if_true(), owner()->function_state()->drop_extra());
-  empty_false->Goto(if_false(), owner()->function_state()->drop_extra());
+  empty_true->Goto(if_true(), owner()->function_state());
+  empty_false->Goto(if_false(), owner()->function_state());
   owner()->set_current_block(NULL);
 }
 
@@ -2359,8 +2551,8 @@ void TestContext::BuildBranch(HValue* value) {
   HBranch* test = new(zone()) HBranch(value, empty_true, empty_false, expected);
   builder->current_block()->Finish(test);
 
-  empty_true->Goto(if_true(), owner()->function_state()->drop_extra());
-  empty_false->Goto(if_false(), owner()->function_state()->drop_extra());
+  empty_true->Goto(if_true(), owner()->function_state());
+  empty_false->Goto(if_false(), owner()->function_state());
   builder->set_current_block(NULL);
 }
 
@@ -2454,6 +2646,10 @@ HGraph* HGraphBuilder::CreateGraph() {
       Bailout("function with illegal redeclaration");
       return NULL;
     }
+    if (scope->calls_eval()) {
+      Bailout("function calls eval");
+      return NULL;
+    }
     SetUpScope(scope);
 
     // Add an edge to the body entry.  This is warty: the graph's start
@@ -2480,7 +2676,7 @@ HGraph* HGraphBuilder::CreateGraph() {
     // Handle implicit declaration of the function name in named function
     // expressions before other declarations.
     if (scope->is_function_scope() && scope->function() != NULL) {
-      HandleDeclaration(scope->function(), CONST, NULL, NULL);
+      VisitVariableDeclaration(scope->function());
     }
     VisitDeclarations(scope->declarations());
     AddSimulate(AstNode::kDeclarationsId);
@@ -2561,27 +2757,411 @@ HGraph* HGraphBuilder::CreateGraph() {
   HStackCheckEliminator sce(graph());
   sce.Process();
 
-  // Replace the results of check instructions with the original value, if the
-  // result is used. This is safe now, since we don't do code motion after this
-  // point. It enables better register allocation since the value produced by
-  // check instructions is really a copy of the original value.
-  graph()->ReplaceCheckedValues();
+  graph()->EliminateRedundantBoundsChecks();
+  graph()->DehoistSimpleArrayIndexComputations();
 
   return graph();
 }
 
 
-void HGraph::ReplaceCheckedValues() {
-  HPhase phase("H_Replace checked values", this);
+// We try to "factor up" HBoundsCheck instructions towards the root of the
+// dominator tree.
+// For now we handle checks where the index is like "exp + int32value".
+// If in the dominator tree we check "exp + v1" and later (dominated)
+// "exp + v2", if v2 <= v1 we can safely remove the second check, and if
+// v2 > v1 we can use v2 in the 1st check and again remove the second.
+// To do so we keep a dictionary of all checks where the key if the pair
+// "exp, length".
+// The class BoundsCheckKey represents this key.
+class BoundsCheckKey : public ZoneObject {
+ public:
+  HValue* IndexBase() const { return index_base_; }
+  HValue* Length() const { return length_; }
+
+  uint32_t Hash() {
+    return static_cast<uint32_t>(index_base_->Hashcode() ^ length_->Hashcode());
+  }
+
+  static BoundsCheckKey* Create(Zone* zone,
+                                HBoundsCheck* check,
+                                int32_t* offset) {
+    HValue* index_base = NULL;
+    HConstant* constant = NULL;
+    bool is_sub = false;
+
+    if (check->index()->IsAdd()) {
+      HAdd* index = HAdd::cast(check->index());
+      if (index->left()->IsConstant()) {
+        constant = HConstant::cast(index->left());
+        index_base = index->right();
+      } else if (index->right()->IsConstant()) {
+        constant = HConstant::cast(index->right());
+        index_base = index->left();
+      }
+    } else if (check->index()->IsSub()) {
+      HSub* index = HSub::cast(check->index());
+      is_sub = true;
+      if (index->left()->IsConstant()) {
+        constant = HConstant::cast(index->left());
+        index_base = index->right();
+      } else if (index->right()->IsConstant()) {
+        constant = HConstant::cast(index->right());
+        index_base = index->left();
+      }
+    }
+
+    if (constant != NULL && constant->HasInteger32Value()) {
+      *offset = is_sub ? - constant->Integer32Value()
+                       : constant->Integer32Value();
+    } else {
+      *offset = 0;
+      index_base = check->index();
+    }
+
+    return new(zone) BoundsCheckKey(index_base, check->length());
+  }
+
+ private:
+  BoundsCheckKey(HValue* index_base, HValue* length)
+    : index_base_(index_base),
+      length_(length) { }
+
+  HValue* index_base_;
+  HValue* length_;
+};
+
+
+// Data about each HBoundsCheck that can be eliminated or moved.
+// It is the "value" in the dictionary indexed by "base-index, length"
+// (the key is BoundsCheckKey).
+// We scan the code with a dominator tree traversal.
+// Traversing the dominator tree we keep a stack (implemented as a singly
+// linked list) of "data" for each basic block that contains a relevant check
+// with the same key (the dictionary holds the head of the list).
+// We also keep all the "data" created for a given basic block in a list, and
+// use it to "clean up" the dictionary when backtracking in the dominator tree
+// traversal.
+// Doing this each dictionary entry always directly points to the check that
+// is dominating the code being examined now.
+// We also track the current "offset" of the index expression and use it to
+// decide if any check is already "covered" (so it can be removed) or not.
+class BoundsCheckBbData: public ZoneObject {
+ public:
+  BoundsCheckKey* Key() const { return key_; }
+  int32_t LowerOffset() const { return lower_offset_; }
+  int32_t UpperOffset() const { return upper_offset_; }
+  HBasicBlock* BasicBlock() const { return basic_block_; }
+  HBoundsCheck* Check() const { return check_; }
+  BoundsCheckBbData* NextInBasicBlock() const { return next_in_bb_; }
+  BoundsCheckBbData* FatherInDominatorTree() const { return father_in_dt_; }
+
+  bool OffsetIsCovered(int32_t offset) const {
+    return offset >= LowerOffset() && offset <= UpperOffset();
+  }
+
+  // This method removes new_check and modifies the current check so that it
+  // also "covers" what new_check covered.
+  // The obvious precondition is that new_check follows Check() in the
+  // same basic block, and that new_offset is not covered (otherwise we
+  // could simply remove new_check).
+  // As a consequence LowerOffset() or UpperOffset() change (the covered
+  // range grows).
+  //
+  // In the general case the check covering the current range should be like
+  // these two checks:
+  // 0 <= Key()->IndexBase() + LowerOffset()
+  // Key()->IndexBase() + UpperOffset() < Key()->Length()
+  //
+  // We can transform the second check like this:
+  // Key()->IndexBase() + LowerOffset() <
+  //     Key()->Length() + (LowerOffset() - UpperOffset())
+  // so we can handle both checks with a single unsigned comparison.
+  //
+  // The bulk of this method changes Check()->index() and Check()->length()
+  // replacing them with new HAdd instructions to perform the transformation
+  // described above.
+  void CoverCheck(HBoundsCheck* new_check,
+                  int32_t new_offset) {
+    ASSERT(new_check->index()->representation().IsInteger32());
+
+    if (new_offset > upper_offset_) {
+      upper_offset_ = new_offset;
+    } else if (new_offset < lower_offset_) {
+      lower_offset_ = new_offset;
+    } else {
+      ASSERT(false);
+    }
+
+    BuildOffsetAdd(&added_index_,
+                   &added_index_offset_,
+                   Key()->IndexBase(),
+                   new_check->index()->representation(),
+                   lower_offset_);
+    Check()->SetOperandAt(0, added_index_);
+    BuildOffsetAdd(&added_length_,
+                   &added_length_offset_,
+                   Key()->Length(),
+                   new_check->length()->representation(),
+                   lower_offset_ - upper_offset_);
+    Check()->SetOperandAt(1, added_length_);
+
+    new_check->DeleteAndReplaceWith(NULL);
+  }
+
+  void RemoveZeroOperations() {
+    RemoveZeroAdd(&added_index_, &added_index_offset_);
+    RemoveZeroAdd(&added_length_, &added_length_offset_);
+  }
+
+  BoundsCheckBbData(BoundsCheckKey* key,
+                    int32_t lower_offset,
+                    int32_t upper_offset,
+                    HBasicBlock* bb,
+                    HBoundsCheck* check,
+                    BoundsCheckBbData* next_in_bb,
+                    BoundsCheckBbData* father_in_dt)
+  : key_(key),
+    lower_offset_(lower_offset),
+    upper_offset_(upper_offset),
+    basic_block_(bb),
+    check_(check),
+    added_index_offset_(NULL),
+    added_index_(NULL),
+    added_length_offset_(NULL),
+    added_length_(NULL),
+    next_in_bb_(next_in_bb),
+    father_in_dt_(father_in_dt) { }
+
+ private:
+  BoundsCheckKey* key_;
+  int32_t lower_offset_;
+  int32_t upper_offset_;
+  HBasicBlock* basic_block_;
+  HBoundsCheck* check_;
+  HConstant* added_index_offset_;
+  HAdd* added_index_;
+  HConstant* added_length_offset_;
+  HAdd* added_length_;
+  BoundsCheckBbData* next_in_bb_;
+  BoundsCheckBbData* father_in_dt_;
+
+  void BuildOffsetAdd(HAdd** add,
+                      HConstant** constant,
+                      HValue* original_value,
+                      Representation representation,
+                      int32_t new_offset) {
+    HConstant* new_constant = new(BasicBlock()->zone())
+        HConstant(Handle<Object>(Smi::FromInt(new_offset)),
+                  Representation::Integer32());
+    if (*add == NULL) {
+      new_constant->InsertBefore(Check());
+      *add = new(BasicBlock()->zone()) HAdd(NULL,
+                                            original_value,
+                                            new_constant);
+      (*add)->AssumeRepresentation(representation);
+      (*add)->InsertBefore(Check());
+    } else {
+      new_constant->InsertBefore(*add);
+      (*constant)->DeleteAndReplaceWith(new_constant);
+    }
+    *constant = new_constant;
+  }
+
+  void RemoveZeroAdd(HAdd** add, HConstant** constant) {
+    if (*add != NULL && (*constant)->Integer32Value() == 0) {
+      (*add)->DeleteAndReplaceWith((*add)->left());
+      (*constant)->DeleteAndReplaceWith(NULL);
+    }
+  }
+};
+
+
+static bool BoundsCheckKeyMatch(void* key1, void* key2) {
+  BoundsCheckKey* k1 = static_cast<BoundsCheckKey*>(key1);
+  BoundsCheckKey* k2 = static_cast<BoundsCheckKey*>(key2);
+  return k1->IndexBase() == k2->IndexBase() && k1->Length() == k2->Length();
+}
+
+
+class BoundsCheckTable : private ZoneHashMap {
+ public:
+  BoundsCheckBbData** LookupOrInsert(BoundsCheckKey* key) {
+    return reinterpret_cast<BoundsCheckBbData**>(
+        &(Lookup(key, key->Hash(), true)->value));
+  }
+
+  void Insert(BoundsCheckKey* key, BoundsCheckBbData* data) {
+    Lookup(key, key->Hash(), true)->value = data;
+  }
+
+  void Delete(BoundsCheckKey* key) {
+    Remove(key, key->Hash());
+  }
+
+  BoundsCheckTable() : ZoneHashMap(BoundsCheckKeyMatch) { }
+};
+
+
+// Eliminates checks in bb and recursively in the dominated blocks.
+// Also replace the results of check instructions with the original value, if
+// the result is used. This is safe now, since we don't do code motion after
+// this point. It enables better register allocation since the value produced
+// by check instructions is really a copy of the original value.
+void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb,
+                                            BoundsCheckTable* table) {
+  BoundsCheckBbData* bb_data_list = NULL;
+
+  for (HInstruction* i = bb->first(); i != NULL; i = i->next()) {
+    if (!i->IsBoundsCheck()) continue;
+
+    HBoundsCheck* check = HBoundsCheck::cast(i);
+    check->ReplaceAllUsesWith(check->index());
+
+    if (!FLAG_array_bounds_checks_elimination) continue;
+
+    int32_t offset;
+    BoundsCheckKey* key =
+        BoundsCheckKey::Create(bb->zone(), check, &offset);
+    BoundsCheckBbData** data_p = table->LookupOrInsert(key);
+    BoundsCheckBbData* data = *data_p;
+    if (data == NULL) {
+      bb_data_list = new(zone()) BoundsCheckBbData(key,
+                                                   offset,
+                                                   offset,
+                                                   bb,
+                                                   check,
+                                                   bb_data_list,
+                                                   NULL);
+      *data_p = bb_data_list;
+    } else if (data->OffsetIsCovered(offset)) {
+      check->DeleteAndReplaceWith(NULL);
+    } else if (data->BasicBlock() == bb) {
+      data->CoverCheck(check, offset);
+    } else {
+      int32_t new_lower_offset = offset < data->LowerOffset()
+          ? offset
+          : data->LowerOffset();
+      int32_t new_upper_offset = offset > data->UpperOffset()
+          ? offset
+          : data->UpperOffset();
+      bb_data_list = new(bb->zone()) BoundsCheckBbData(key,
+                                                       new_lower_offset,
+                                                       new_upper_offset,
+                                                       bb,
+                                                       check,
+                                                       bb_data_list,
+                                                       data);
+      table->Insert(key, bb_data_list);
+    }
+  }
+
+  for (int i = 0; i < bb->dominated_blocks()->length(); ++i) {
+    EliminateRedundantBoundsChecks(bb->dominated_blocks()->at(i), table);
+  }
+
+  for (BoundsCheckBbData* data = bb_data_list;
+       data != NULL;
+       data = data->NextInBasicBlock()) {
+    data->RemoveZeroOperations();
+    if (data->FatherInDominatorTree()) {
+      table->Insert(data->Key(), data->FatherInDominatorTree());
+    } else {
+      table->Delete(data->Key());
+    }
+  }
+}
+
+
+void HGraph::EliminateRedundantBoundsChecks() {
+  HPhase phase("H_Eliminate bounds checks", this);
+  AssertNoAllocation no_gc;
+  BoundsCheckTable checks_table;
+  EliminateRedundantBoundsChecks(entry_block(), &checks_table);
+}
+
+
+static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) {
+  HValue* index = array_operation->GetKey();
+
+  HConstant* constant;
+  HValue* subexpression;
+  int32_t sign;
+  if (index->IsAdd()) {
+    sign = 1;
+    HAdd* add = HAdd::cast(index);
+    if (add->left()->IsConstant()) {
+      subexpression = add->right();
+      constant = HConstant::cast(add->left());
+    } else if (add->right()->IsConstant()) {
+      subexpression = add->left();
+      constant = HConstant::cast(add->right());
+    } else {
+      return;
+    }
+  } else if (index->IsSub()) {
+    sign = -1;
+    HSub* sub = HSub::cast(index);
+    if (sub->left()->IsConstant()) {
+      subexpression = sub->right();
+      constant = HConstant::cast(sub->left());
+    } else if (sub->right()->IsConstant()) {
+      subexpression = sub->left();
+      constant = HConstant::cast(sub->right());
+    } return;
+  } else {
+    return;
+  }
+
+  if (!constant->HasInteger32Value()) return;
+  int32_t value = constant->Integer32Value() * sign;
+  // We limit offset values to 30 bits because we want to avoid the risk of
+  // overflows when the offset is added to the object header size.
+  if (value >= 1 << 30 || value < 0) return;
+  array_operation->SetKey(subexpression);
+  if (index->HasNoUses()) {
+    index->DeleteAndReplaceWith(NULL);
+  }
+  ASSERT(value >= 0);
+  array_operation->SetIndexOffset(static_cast<uint32_t>(value));
+  array_operation->SetDehoisted(true);
+}
+
+
+void HGraph::DehoistSimpleArrayIndexComputations() {
+  if (!FLAG_array_index_dehoisting) return;
+
+  HPhase phase("H_Dehoist index computations", this);
   for (int i = 0; i < blocks()->length(); ++i) {
-    HInstruction* instr = blocks()->at(i)->first();
-    while (instr != NULL) {
-      if (instr->IsBoundsCheck()) {
-        // Replace all uses of the checked value with the original input.
-        ASSERT(instr->UseCount() > 0);
-        instr->ReplaceAllUsesWith(HBoundsCheck::cast(instr)->index());
+    for (HInstruction* instr = blocks()->at(i)->first();
+        instr != NULL;
+        instr = instr->next()) {
+      ArrayInstructionInterface* array_instruction = NULL;
+      if (instr->IsLoadKeyedFastElement()) {
+        HLoadKeyedFastElement* op = HLoadKeyedFastElement::cast(instr);
+        array_instruction = static_cast<ArrayInstructionInterface*>(op);
+      } else if (instr->IsLoadKeyedFastDoubleElement()) {
+        HLoadKeyedFastDoubleElement* op =
+            HLoadKeyedFastDoubleElement::cast(instr);
+        array_instruction = static_cast<ArrayInstructionInterface*>(op);
+      } else if (instr->IsLoadKeyedSpecializedArrayElement()) {
+        HLoadKeyedSpecializedArrayElement* op =
+            HLoadKeyedSpecializedArrayElement::cast(instr);
+        array_instruction = static_cast<ArrayInstructionInterface*>(op);
+      } else if (instr->IsStoreKeyedFastElement()) {
+        HStoreKeyedFastElement* op = HStoreKeyedFastElement::cast(instr);
+        array_instruction = static_cast<ArrayInstructionInterface*>(op);
+      } else if (instr->IsStoreKeyedFastDoubleElement()) {
+        HStoreKeyedFastDoubleElement* op =
+            HStoreKeyedFastDoubleElement::cast(instr);
+        array_instruction = static_cast<ArrayInstructionInterface*>(op);
+      } else if (instr->IsStoreKeyedSpecializedArrayElement()) {
+        HStoreKeyedSpecializedArrayElement* op =
+            HStoreKeyedSpecializedArrayElement::cast(instr);
+        array_instruction = static_cast<ArrayInstructionInterface*>(op);
+      } else {
+        continue;
       }
-      instr = instr->next();
+      DehoistArrayIndex(array_instruction);
     }
   }
 }
@@ -2697,7 +3277,7 @@ void HGraphBuilder::VisitBlock(Block* stmt) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
-  if (stmt->block_scope() != NULL) {
+  if (stmt->scope() != NULL) {
     return Bailout("ScopedBlock");
   }
   BreakAndContinueInfo break_info(stmt);
@@ -2851,10 +3431,10 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
     if (context->IsTest()) {
       TestContext* test = TestContext::cast(context);
       CHECK_ALIVE(VisitForEffect(stmt->expression()));
-      current_block()->Goto(test->if_true(), function_state()->drop_extra());
+      current_block()->Goto(test->if_true(), function_state());
     } else if (context->IsEffect()) {
       CHECK_ALIVE(VisitForEffect(stmt->expression()));
-      current_block()->Goto(function_return(), function_state()->drop_extra());
+      current_block()->Goto(function_return(), function_state());
     } else {
       ASSERT(context->IsValue());
       CHECK_ALIVE(VisitForValue(stmt->expression()));
@@ -2871,10 +3451,10 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
       current_block()->Finish(typecheck);
       if_spec_object->AddLeaveInlined(return_value,
                                       function_return(),
-                                      function_state()->drop_extra());
+                                      function_state());
       not_spec_object->AddLeaveInlined(receiver,
                                        function_return(),
-                                       function_state()->drop_extra());
+                                       function_state());
     }
   } else {
     // Return from an inlined function, visit the subexpression in the
@@ -2886,14 +3466,14 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
                       test->if_false());
     } else if (context->IsEffect()) {
       CHECK_ALIVE(VisitForEffect(stmt->expression()));
-      current_block()->Goto(function_return(), function_state()->drop_extra());
+      current_block()->Goto(function_return(), function_state());
     } else {
       ASSERT(context->IsValue());
       CHECK_ALIVE(VisitForValue(stmt->expression()));
       HValue* return_value = Pop();
       current_block()->AddLeaveInlined(return_value,
                                        function_return(),
-                                       function_state()->drop_extra());
+                                       function_state());
     }
   }
   set_current_block(NULL);
@@ -3603,7 +4183,6 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
       } else {
         HValue* context = environment()->LookupContext();
         HGlobalObject* global_object = new(zone()) HGlobalObject(context);
-        if (variable->is_qml_global()) global_object->set_qml_global(true);
         AddInstruction(global_object);
         HLoadGlobalGeneric* instr =
             new(zone()) HLoadGlobalGeneric(context,
@@ -3679,10 +4258,11 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
     if (boilerplate->HasFastDoubleElements()) {
       *total_size += FixedDoubleArray::SizeFor(elements->length());
     } else if (boilerplate->HasFastElements()) {
+      Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
       int length = elements->length();
       for (int i = 0; i < length; i++) {
         if ((*max_properties)-- == 0) return false;
-        Handle<Object> value = JSObject::GetElement(boilerplate, i);
+        Handle<Object> value(fast_elements->get(i));
         if (value->IsJSObject()) {
           Handle<JSObject> value_object = Handle<JSObject>::cast(value);
           if (!IsFastLiteral(value_object,
@@ -4231,7 +4811,6 @@ void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
   } else {
     HValue* context =  environment()->LookupContext();
     HGlobalObject* global_object = new(zone()) HGlobalObject(context);
-    if (var->is_qml_global()) global_object->set_qml_global(true);
     AddInstruction(global_object);
     HStoreGlobalGeneric* instr =
         new(zone()) HStoreGlobalGeneric(context,
@@ -4982,6 +5561,34 @@ HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object,
                          function_strict_mode_flag());
 }
 
+
+void HGraphBuilder::EnsureArgumentsArePushedForAccess() {
+  // Outermost function already has arguments on the stack.
+  if (function_state()->outer() == NULL) return;
+
+  if (function_state()->arguments_pushed()) return;
+
+  // Push arguments when entering inlined function.
+  HEnterInlined* entry = function_state()->entry();
+
+  ZoneList<HValue*>* arguments_values = entry->arguments_values();
+
+  HInstruction* insert_after = entry;
+  for (int i = 0; i < arguments_values->length(); i++) {
+    HValue* argument = arguments_values->at(i);
+    HInstruction* push_argument = new(zone()) HPushArgument(argument);
+    push_argument->InsertAfter(insert_after);
+    insert_after = push_argument;
+  }
+
+  HArgumentsElements* arguments_elements =
+      new(zone()) HArgumentsElements(true);
+  arguments_elements->ClearFlag(HValue::kUseGVN);
+  arguments_elements->InsertAfter(insert_after);
+  function_state()->set_arguments_elements(arguments_elements);
+}
+
+
 bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
   VariableProxy* proxy = expr->obj()->AsVariableProxy();
   if (proxy == NULL) return false;
@@ -4990,31 +5597,51 @@ bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
     return false;
   }
 
-  // Our implementation of arguments (based on this stack frame or an
-  // adapter below it) does not work for inlined functions.
-  if (function_state()->outer() != NULL) {
-    Bailout("arguments access in inlined function");
-    return true;
-  }
-
   HInstruction* result = NULL;
   if (expr->key()->IsPropertyName()) {
     Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
     if (!name->IsEqualTo(CStrVector("length"))) return false;
-    HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
-    result = new(zone()) HArgumentsLength(elements);
+
+    if (function_state()->outer() == NULL) {
+      HInstruction* elements = AddInstruction(
+          new(zone()) HArgumentsElements(false));
+      result = new(zone()) HArgumentsLength(elements);
+    } else {
+      // Number of arguments without receiver.
+      int argument_count = environment()->
+          arguments_environment()->parameter_count() - 1;
+      result = new(zone()) HConstant(
+        Handle<Object>(Smi::FromInt(argument_count)),
+        Representation::Integer32());
+    }
   } else {
     Push(graph()->GetArgumentsObject());
     VisitForValue(expr->key());
     if (HasStackOverflow() || current_block() == NULL) return true;
     HValue* key = Pop();
     Drop(1);  // Arguments object.
-    HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
-    HInstruction* length = AddInstruction(
-        new(zone()) HArgumentsLength(elements));
-    HInstruction* checked_key =
-        AddInstruction(new(zone()) HBoundsCheck(key, length));
-    result = new(zone()) HAccessArgumentsAt(elements, length, checked_key);
+    if (function_state()->outer() == NULL) {
+      HInstruction* elements = AddInstruction(
+          new(zone()) HArgumentsElements(false));
+      HInstruction* length = AddInstruction(
+          new(zone()) HArgumentsLength(elements));
+      HInstruction* checked_key =
+          AddInstruction(new(zone()) HBoundsCheck(key, length));
+      result = new(zone()) HAccessArgumentsAt(elements, length, checked_key);
+    } else {
+      EnsureArgumentsArePushedForAccess();
+
+      // Number of arguments without receiver.
+      HInstruction* elements = function_state()->arguments_elements();
+      int argument_count = environment()->
+          arguments_environment()->parameter_count() - 1;
+      HInstruction* length = AddInstruction(new(zone()) HConstant(
+        Handle<Object>(Smi::FromInt(argument_count)),
+        Representation::Integer32()));
+      HInstruction* checked_key =
+          AddInstruction(new(zone()) HBoundsCheck(key, length));
+      result = new(zone()) HAccessArgumentsAt(elements, length, checked_key);
+    }
   }
   ast_context()->ReturnInstruction(result, expr->id());
   return true;
@@ -5120,6 +5747,39 @@ void HGraphBuilder::AddCheckConstantFunction(Call* expr,
 }
 
 
+class FunctionSorter {
+ public:
+  FunctionSorter() : index_(0), ticks_(0), ast_length_(0), src_length_(0) { }
+  FunctionSorter(int index, int ticks, int ast_length, int src_length)
+      : index_(index),
+        ticks_(ticks),
+        ast_length_(ast_length),
+        src_length_(src_length) { }
+
+  int index() const { return index_; }
+  int ticks() const { return ticks_; }
+  int ast_length() const { return ast_length_; }
+  int src_length() const { return src_length_; }
+
+ private:
+  int index_;
+  int ticks_;
+  int ast_length_;
+  int src_length_;
+};
+
+
+static int CompareHotness(void const* a, void const* b) {
+  FunctionSorter const* function1 = reinterpret_cast<FunctionSorter const*>(a);
+  FunctionSorter const* function2 = reinterpret_cast<FunctionSorter const*>(b);
+  int diff = function1->ticks() - function2->ticks();
+  if (diff != 0) return -diff;
+  diff = function1->ast_length() - function2->ast_length();
+  if (diff != 0) return diff;
+  return function1->src_length() - function2->src_length();
+}
+
+
 void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
                                                HValue* receiver,
                                                SmallMapList* types,
@@ -5128,51 +5788,73 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
   // maps are identical. In that case we can avoid repeatedly generating the
   // same prototype map checks.
   int argument_count = expr->arguments()->length() + 1;  // Includes receiver.
-  int count = 0;
   HBasicBlock* join = NULL;
-  for (int i = 0; i < types->length() && count < kMaxCallPolymorphism; ++i) {
+  FunctionSorter order[kMaxCallPolymorphism];
+  int ordered_functions = 0;
+  for (int i = 0;
+       i < types->length() && ordered_functions < kMaxCallPolymorphism;
+       ++i) {
     Handle<Map> map = types->at(i);
     if (expr->ComputeTarget(map, name)) {
-      if (count == 0) {
-        // Only needed once.
-        AddInstruction(new(zone()) HCheckNonSmi(receiver));
-        join = graph()->CreateBasicBlock();
-      }
-      ++count;
-      HBasicBlock* if_true = graph()->CreateBasicBlock();
-      HBasicBlock* if_false = graph()->CreateBasicBlock();
-      HCompareMap* compare =
-          new(zone()) HCompareMap(receiver, map, if_true, if_false);
-      current_block()->Finish(compare);
+      order[ordered_functions++] =
+          FunctionSorter(i,
+                         expr->target()->shared()->profiler_ticks(),
+                         InliningAstSize(expr->target()),
+                         expr->target()->shared()->SourceSize());
+    }
+  }
 
-      set_current_block(if_true);
-      AddCheckConstantFunction(expr, receiver, map, false);
-      if (FLAG_trace_inlining && FLAG_polymorphic_inlining) {
-        PrintF("Trying to inline the polymorphic call to %s\n",
-               *name->ToCString());
-      }
-      if (FLAG_polymorphic_inlining && TryInlineCall(expr)) {
-        // Trying to inline will signal that we should bailout from the
-        // entire compilation by setting stack overflow on the visitor.
-        if (HasStackOverflow()) return;
-      } else {
-        HCallConstantFunction* call =
-            new(zone()) HCallConstantFunction(expr->target(), argument_count);
-        call->set_position(expr->position());
-        PreProcessCall(call);
-        AddInstruction(call);
-        if (!ast_context()->IsEffect()) Push(call);
-      }
+  qsort(reinterpret_cast<void*>(&order[0]),
+        ordered_functions,
+        sizeof(order[0]),
+        &CompareHotness);
 
-      if (current_block() != NULL) current_block()->Goto(join);
-      set_current_block(if_false);
+  for (int fn = 0; fn < ordered_functions; ++fn) {
+    int i = order[fn].index();
+    Handle<Map> map = types->at(i);
+    if (fn == 0) {
+      // Only needed once.
+      AddInstruction(new(zone()) HCheckNonSmi(receiver));
+      join = graph()->CreateBasicBlock();
+    }
+    HBasicBlock* if_true = graph()->CreateBasicBlock();
+    HBasicBlock* if_false = graph()->CreateBasicBlock();
+    HCompareMap* compare =
+        new(zone()) HCompareMap(receiver, map, if_true, if_false);
+    current_block()->Finish(compare);
+
+    set_current_block(if_true);
+    expr->ComputeTarget(map, name);
+    AddCheckConstantFunction(expr, receiver, map, false);
+    if (FLAG_trace_inlining && FLAG_polymorphic_inlining) {
+      Handle<JSFunction> caller = info()->closure();
+      SmartArrayPointer<char> caller_name =
+          caller->shared()->DebugName()->ToCString();
+      PrintF("Trying to inline the polymorphic call to %s from %s\n",
+             *name->ToCString(),
+             *caller_name);
+    }
+    if (FLAG_polymorphic_inlining && TryInlineCall(expr)) {
+      // Trying to inline will signal that we should bailout from the
+      // entire compilation by setting stack overflow on the visitor.
+      if (HasStackOverflow()) return;
+    } else {
+      HCallConstantFunction* call =
+          new(zone()) HCallConstantFunction(expr->target(), argument_count);
+      call->set_position(expr->position());
+      PreProcessCall(call);
+      AddInstruction(call);
+      if (!ast_context()->IsEffect()) Push(call);
     }
+
+    if (current_block() != NULL) current_block()->Goto(join);
+    set_current_block(if_false);
   }
 
   // Finish up.  Unconditionally deoptimize if we've handled all the maps we
   // know about and do not want to handle ones we've never seen.  Otherwise
   // use a generic IC.
-  if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
+  if (ordered_functions == types->length() && FLAG_deoptimize_uncommon_cases) {
     current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses);
   } else {
     HValue* context = environment()->LookupContext();
@@ -5221,14 +5903,11 @@ void HGraphBuilder::TraceInline(Handle<JSFunction> target,
 }
 
 
-bool HGraphBuilder::TryInline(CallKind call_kind,
-                              Handle<JSFunction> target,
-                              ZoneList<Expression*>* arguments,
-                              HValue* receiver,
-                              int ast_id,
-                              int return_id,
-                              ReturnHandlingFlag return_handling) {
-  if (!FLAG_use_inlining) return false;
+static const int kNotInlinable = 1000000000;
+
+
+int HGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
+  if (!FLAG_use_inlining) return kNotInlinable;
 
   // Precondition: call is monomorphic and we have found a target with the
   // appropriate arity.
@@ -5237,29 +5916,46 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
 
   // Do a quick check on source code length to avoid parsing large
   // inlining candidates.
-  if ((FLAG_limit_inlining && target_shared->SourceSize() > kMaxSourceSize)
-      || target_shared->SourceSize() > kUnlimitedMaxSourceSize) {
+  if (target_shared->SourceSize() >
+      Min(FLAG_max_inlined_source_size, kUnlimitedMaxInlinedSourceSize)) {
     TraceInline(target, caller, "target text too big");
-    return false;
+    return kNotInlinable;
   }
 
   // Target must be inlineable.
   if (!target->IsInlineable()) {
     TraceInline(target, caller, "target not inlineable");
-    return false;
+    return kNotInlinable;
   }
   if (target_shared->dont_inline() || target_shared->dont_optimize()) {
     TraceInline(target, caller, "target contains unsupported syntax [early]");
-    return false;
+    return kNotInlinable;
   }
 
   int nodes_added = target_shared->ast_node_count();
-  if ((FLAG_limit_inlining && nodes_added > kMaxInlinedSize) ||
-      nodes_added > kUnlimitedMaxInlinedSize) {
+  return nodes_added;
+}
+
+
+bool HGraphBuilder::TryInline(CallKind call_kind,
+                              Handle<JSFunction> target,
+                              ZoneList<Expression*>* arguments,
+                              HValue* receiver,
+                              int ast_id,
+                              int return_id,
+                              ReturnHandlingFlag return_handling) {
+  int nodes_added = InliningAstSize(target);
+  if (nodes_added == kNotInlinable) return false;
+
+  Handle<JSFunction> caller = info()->closure();
+
+  if (nodes_added > Min(FLAG_max_inlined_nodes, kUnlimitedMaxInlinedNodes)) {
     TraceInline(target, caller, "target AST is too large [early]");
     return false;
   }
 
+  Handle<SharedFunctionInfo> target_shared(target->shared());
+
 #if !defined(V8_TARGET_ARCH_IA32)
   // Target must be able to use caller's context.
   CompilationInfo* outer_info = info();
@@ -5297,8 +5993,8 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
   }
 
   // We don't want to add more than a certain number of nodes from inlining.
-  if ((FLAG_limit_inlining && inlined_count_ > kMaxInlinedNodes) ||
-      inlined_count_ > kUnlimitedMaxInlinedNodes) {
+  if (inlined_count_ > Min(FLAG_max_inlined_nodes_cumulative,
+                           kUnlimitedMaxInlinedNodesCumulative)) {
     TraceInline(target, caller, "cumulative AST node limit reached");
     return false;
   }
@@ -5325,8 +6021,7 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
   // The following conditions must be checked again after re-parsing, because
   // earlier the information might not have been complete due to lazy parsing.
   nodes_added = function->ast_node_count();
-  if ((FLAG_limit_inlining && nodes_added > kMaxInlinedSize) ||
-      nodes_added > kUnlimitedMaxInlinedSize) {
+  if (nodes_added > Min(FLAG_max_inlined_nodes, kUnlimitedMaxInlinedNodes)) {
     TraceInline(target, caller, "target AST is too large [late]");
     return false;
   }
@@ -5421,20 +6116,42 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
   AddInstruction(context);
   inner_env->BindContext(context);
 #endif
+
   AddSimulate(return_id);
   current_block()->UpdateEnvironment(inner_env);
-  AddInstruction(new(zone()) HEnterInlined(target,
-                                           arguments->length(),
-                                           function,
-                                           call_kind,
-                                           function_state()->is_construct(),
-                                           function->scope()->arguments()));
+
+  ZoneList<HValue*>* arguments_values = NULL;
+
+  // If the function uses arguments copy current arguments values
+  // to use them for materialization.
+  if (function->scope()->arguments() != NULL) {
+    HEnvironment* arguments_env = inner_env->arguments_environment();
+    int arguments_count = arguments_env->parameter_count();
+    arguments_values = new(zone()) ZoneList<HValue*>(arguments_count);
+    for (int i = 0; i < arguments_count; i++) {
+      arguments_values->Add(arguments_env->Lookup(i));
+    }
+  }
+
+  HEnterInlined* enter_inlined =
+      new(zone()) HEnterInlined(target,
+                                arguments->length(),
+                                function,
+                                call_kind,
+                                function_state()->is_construct(),
+                                function->scope()->arguments(),
+                                arguments_values);
+  function_state()->set_entry(enter_inlined);
+  AddInstruction(enter_inlined);
+
   // If the function uses arguments object create and bind one.
   if (function->scope()->arguments() != NULL) {
     ASSERT(function->scope()->arguments()->IsStackAllocated());
-    environment()->Bind(function->scope()->arguments(),
-                        graph()->GetArgumentsObject());
+    inner_env->Bind(function->scope()->arguments(),
+                    graph()->GetArgumentsObject());
   }
+
+
   VisitDeclarations(target_info.scope()->declarations());
   VisitStatements(function->body());
   if (HasStackOverflow()) {
@@ -5463,17 +6180,17 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
           : undefined;
       current_block()->AddLeaveInlined(return_value,
                                        function_return(),
-                                       function_state()->drop_extra());
+                                       function_state());
     } else if (call_context()->IsEffect()) {
       ASSERT(function_return() != NULL);
-      current_block()->Goto(function_return(), function_state()->drop_extra());
+      current_block()->Goto(function_return(), function_state());
     } else {
       ASSERT(call_context()->IsTest());
       ASSERT(inlined_test_context() != NULL);
       HBasicBlock* target = function_state()->is_construct()
           ? inlined_test_context()->if_true()
           : inlined_test_context()->if_false();
-      current_block()->Goto(target, function_state()->drop_extra());
+      current_block()->Goto(target, function_state());
     }
   }
 
@@ -5491,12 +6208,12 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
     if (if_true->HasPredecessor()) {
       if_true->SetJoinId(ast_id);
       HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
-      if_true->Goto(true_target, function_state()->drop_extra());
+      if_true->Goto(true_target, function_state());
     }
     if (if_false->HasPredecessor()) {
       if_false->SetJoinId(ast_id);
       HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
-      if_false->Goto(false_target, function_state()->drop_extra());
+      if_false->Goto(false_target, function_state());
     }
     set_current_block(NULL);
     return true;
@@ -5805,7 +6522,8 @@ bool HGraphBuilder::TryCallApply(Call* expr) {
   HValue* receiver = Pop();
 
   if (function_state()->outer() == NULL) {
-    HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
+    HInstruction* elements = AddInstruction(
+        new(zone()) HArgumentsElements(false));
     HInstruction* length =
         AddInstruction(new(zone()) HArgumentsLength(elements));
     HValue* wrapped_receiver =
@@ -5938,6 +6656,10 @@ void HGraphBuilder::VisitCall(Call* expr) {
     VariableProxy* proxy = expr->expression()->AsVariableProxy();
     bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
 
+    if (proxy != NULL && proxy->var()->is_possibly_eval()) {
+      return Bailout("possible direct call to eval");
+    }
+
     if (global_call) {
       Variable* var = proxy->var();
       bool known_global_function = false;
@@ -5987,13 +6709,11 @@ void HGraphBuilder::VisitCall(Call* expr) {
       } else {
         HValue* context = environment()->LookupContext();
         HGlobalObject* receiver = new(zone()) HGlobalObject(context);
-        if (var->is_qml_global()) receiver->set_qml_global(true);
         AddInstruction(receiver);
         PushAndAdd(new(zone()) HPushArgument(receiver));
         CHECK_ALIVE(VisitArgumentList(expr->arguments()));
 
         call = new(zone()) HCallGlobal(context, var->name(), argument_count);
-        if (var->is_qml_global()) static_cast<HCallGlobal*>(call)->set_qml_global(true);
         Drop(argument_count);
       }
 
@@ -6022,9 +6742,11 @@ void HGraphBuilder::VisitCall(Call* expr) {
       if (TryInlineCall(expr, true)) {   // Drop function from environment.
         return;
       } else {
-        call = PreProcessCall(new(zone()) HInvokeFunction(context,
-                                                          function,
-                                                          argument_count));
+        call = PreProcessCall(
+            new(zone()) HInvokeFunction(context,
+                                        function,
+                                        expr->target(),
+                                        argument_count));
         Drop(1);  // The function.
       }
 
@@ -7019,91 +7741,50 @@ void HGraphBuilder::VisitThisFunction(ThisFunction* expr) {
 
 
 void HGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
-  int length = declarations->length();
-  int global_count = 0;
-  for (int i = 0; i < declarations->length(); i++) {
-    Declaration* decl = declarations->at(i);
-    FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration();
-    HandleDeclaration(decl->proxy(),
-                      decl->mode(),
-                      fun_decl != NULL ? fun_decl->fun() : NULL,
-                      &global_count);
-  }
-
-  // Batch declare global functions and variables.
-  if (global_count > 0) {
+  ASSERT(globals_.is_empty());
+  AstVisitor::VisitDeclarations(declarations);
+  if (!globals_.is_empty()) {
     Handle<FixedArray> array =
-        isolate()->factory()->NewFixedArray(3 * global_count, TENURED);
-    for (int j = 0, i = 0; i < length; i++) {
-      Declaration* decl = declarations->at(i);
-      Variable* var = decl->proxy()->var();
-
-      if (var->IsUnallocated()) {
-        array->set(j++, *(var->name()));
-        FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration();
-        if (fun_decl == NULL) {
-          if (var->binding_needs_init()) {
-            // In case this binding needs initialization use the hole.
-            array->set_the_hole(j++);
-          } else {
-            array->set_undefined(j++);
-          }
-        } else {
-          Handle<SharedFunctionInfo> function =
-              Compiler::BuildFunctionInfo(fun_decl->fun(), info()->script());
-          // Check for stack-overflow exception.
-          if (function.is_null()) {
-            SetStackOverflow();
-            return;
-          }
-          array->set(j++, *function);
-        }
-      }
-      array->set(j++, Smi::FromInt(var->is_qml_global()));
-    }
+       isolate()->factory()->NewFixedArray(globals_.length(), TENURED);
+    for (int i = 0; i < globals_.length(); ++i) array->set(i, *globals_.at(i));
     int flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
                 DeclareGlobalsNativeFlag::encode(info()->is_native()) |
                 DeclareGlobalsLanguageMode::encode(info()->language_mode());
-    HInstruction* result =
-        new(zone()) HDeclareGlobals(environment()->LookupContext(),
-                                    array,
-                                    flags);
+    HInstruction* result = new(zone()) HDeclareGlobals(
+        environment()->LookupContext(), array, flags);
     AddInstruction(result);
+    globals_.Clear();
   }
 }
 
 
-void HGraphBuilder::HandleDeclaration(VariableProxy* proxy,
-                                      VariableMode mode,
-                                      FunctionLiteral* function,
-                                      int* global_count) {
-  Variable* var = proxy->var();
-  bool binding_needs_init =
-      (mode == CONST || mode == CONST_HARMONY || mode == LET);
-  switch (var->location()) {
+void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* declaration) {
+  VariableProxy* proxy = declaration->proxy();
+  VariableMode mode = declaration->mode();
+  Variable* variable = proxy->var();
+  bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
+  switch (variable->location()) {
     case Variable::UNALLOCATED:
-      ++(*global_count);
+      globals_.Add(variable->name());
+      globals_.Add(variable->binding_needs_init()
+                       ? isolate()->factory()->the_hole_value()
+                       : isolate()->factory()->undefined_value());
       return;
     case Variable::PARAMETER:
     case Variable::LOCAL:
+      if (hole_init) {
+        HValue* value = graph()->GetConstantHole();
+        environment()->Bind(variable, value);
+      }
+      break;
     case Variable::CONTEXT:
-      if (binding_needs_init || function != NULL) {
-        HValue* value = NULL;
-        if (function != NULL) {
-          CHECK_ALIVE(VisitForValue(function));
-          value = Pop();
-        } else {
-          value = graph()->GetConstantHole();
-        }
-        if (var->IsContextSlot()) {
-          HValue* context = environment()->LookupContext();
-          HStoreContextSlot* store = new HStoreContextSlot(
-              context, var->index(), HStoreContextSlot::kNoCheck, value);
-          AddInstruction(store);
-          if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
-        } else {
-          environment()->Bind(var, value);
-        }
+      if (hole_init) {
+        HValue* value = graph()->GetConstantHole();
+        HValue* context = environment()->LookupContext();
+        HStoreContextSlot* store = new HStoreContextSlot(
+            context, variable->index(), HStoreContextSlot::kNoCheck, value);
+        AddInstruction(store);
+        if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
       }
       break;
     case Variable::LOOKUP:
@@ -7112,48 +7793,74 @@ void HGraphBuilder::HandleDeclaration(VariableProxy* proxy,
 }
 
 
-void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
-  UNREACHABLE();
-}
-
-
-void HGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* decl) {
-  UNREACHABLE();
+void HGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* declaration) {
+  VariableProxy* proxy = declaration->proxy();
+  Variable* variable = proxy->var();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      globals_.Add(variable->name());
+      Handle<SharedFunctionInfo> function =
+          Compiler::BuildFunctionInfo(declaration->fun(), info()->script());
+      // Check for stack-overflow exception.
+      if (function.is_null()) return SetStackOverflow();
+      globals_.Add(function);
+      return;
+    }
+    case Variable::PARAMETER:
+    case Variable::LOCAL: {
+      CHECK_ALIVE(VisitForValue(declaration->fun()));
+      HValue* value = Pop();
+      environment()->Bind(variable, value);
+      break;
+    }
+    case Variable::CONTEXT: {
+      CHECK_ALIVE(VisitForValue(declaration->fun()));
+      HValue* value = Pop();
+      HValue* context = environment()->LookupContext();
+      HStoreContextSlot* store = new HStoreContextSlot(
+          context, variable->index(), HStoreContextSlot::kNoCheck, value);
+      AddInstruction(store);
+      if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
+      break;
+    }
+    case Variable::LOOKUP:
+      return Bailout("unsupported lookup slot in declaration");
+  }
 }
 
 
-void HGraphBuilder::VisitModuleDeclaration(ModuleDeclaration* decl) {
+void HGraphBuilder::VisitModuleDeclaration(ModuleDeclaration* declaration) {
   UNREACHABLE();
 }
 
 
-void HGraphBuilder::VisitImportDeclaration(ImportDeclaration* decl) {
+void HGraphBuilder::VisitImportDeclaration(ImportDeclaration* declaration) {
   UNREACHABLE();
 }
 
 
-void HGraphBuilder::VisitExportDeclaration(ExportDeclaration* decl) {
+void HGraphBuilder::VisitExportDeclaration(ExportDeclaration* declaration) {
   UNREACHABLE();
 }
 
 
 void HGraphBuilder::VisitModuleLiteral(ModuleLiteral* module) {
-  // TODO(rossberg)
+  UNREACHABLE();
 }
 
 
 void HGraphBuilder::VisitModuleVariable(ModuleVariable* module) {
-  // TODO(rossberg)
+  UNREACHABLE();
 }
 
 
 void HGraphBuilder::VisitModulePath(ModulePath* module) {
-  // TODO(rossberg)
+  UNREACHABLE();
 }
 
 
 void HGraphBuilder::VisitModuleUrl(ModuleUrl* module) {
-  // TODO(rossberg)
+  UNREACHABLE();
 }
 
 
@@ -7274,7 +7981,8 @@ void HGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
   // function is blacklisted by AstNode::IsInlineable.
   ASSERT(function_state()->outer() == NULL);
   ASSERT(call->arguments()->length() == 0);
-  HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
+  HInstruction* elements = AddInstruction(
+      new(zone()) HArgumentsElements(false));
   HArgumentsLength* result = new(zone()) HArgumentsLength(elements);
   return ast_context()->ReturnInstruction(result, call->id());
 }
@@ -7288,7 +7996,8 @@ void HGraphBuilder::GenerateArguments(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* index = Pop();
-  HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
+  HInstruction* elements = AddInstruction(
+      new(zone()) HArgumentsElements(false));
   HInstruction* length = AddInstruction(new(zone()) HArgumentsLength(elements));
   HAccessArgumentsAt* result =
       new(zone()) HAccessArgumentsAt(elements, length, index);
@@ -7511,14 +8220,6 @@ void HGraphBuilder::GenerateNumberToString(CallRuntime* call) {
 }
 
 
-// Fast swapping of elements. Takes three expressions, the object and two
-// indices. This should only be used if the indices are known to be
-// non-negative and within bounds of the elements array at the call site.
-void HGraphBuilder::GenerateSwapElements(CallRuntime* call) {
-  return Bailout("inlined runtime function: SwapElements");
-}
-
-
 // Fast call for custom callbacks.
 void HGraphBuilder::GenerateCallFunction(CallRuntime* call) {
   // 1 ~ The function to call is not itself an argument to the call.
index bc9bc9d..b56a5af 100644 (file)
@@ -42,6 +42,7 @@ namespace internal {
 
 // Forward declarations.
 class BitVector;
+class FunctionState;
 class HEnvironment;
 class HGraph;
 class HLoopInformation;
@@ -121,7 +122,7 @@ class HBasicBlock: public ZoneObject {
 
   void Finish(HControlInstruction* last);
   void FinishExit(HControlInstruction* instruction);
-  void Goto(HBasicBlock* block, bool drop_extra = false);
+  void Goto(HBasicBlock* block, FunctionState* state = NULL);
 
   int PredecessorIndexOf(HBasicBlock* predecessor) const;
   void AddSimulate(int ast_id) { AddInstruction(CreateSimulate(ast_id)); }
@@ -136,7 +137,7 @@ class HBasicBlock: public ZoneObject {
   // instruction and updating the bailout environment.
   void AddLeaveInlined(HValue* return_value,
                        HBasicBlock* target,
-                       bool drop_extra = false);
+                       FunctionState* state = NULL);
 
   // If a target block is tagged as an inline function return, all
   // predecessors should contain the inlined exit sequence:
@@ -240,7 +241,7 @@ class HLoopInformation: public ZoneObject {
   HStackCheck* stack_check_;
 };
 
-
+class BoundsCheckTable;
 class HGraph: public ZoneObject {
  public:
   explicit HGraph(CompilationInfo* info);
@@ -265,6 +266,8 @@ class HGraph: public ZoneObject {
   void OrderBlocks();
   void AssignDominators();
   void ReplaceCheckedValues();
+  void EliminateRedundantBoundsChecks();
+  void DehoistSimpleArrayIndexComputations();
   void PropagateDeoptimizingMark();
 
   // Returns false if there are phi-uses of the arguments-object
@@ -357,6 +360,7 @@ class HGraph: public ZoneObject {
   void InferTypes(ZoneList<HValue*>* worklist);
   void InitializeInferredTypes(int from_inclusive, int to_inclusive);
   void CheckForBackEdge(HBasicBlock* block, HBasicBlock* successor);
+  void EliminateRedundantBoundsChecks(HBasicBlock* bb, BoundsCheckTable* table);
 
   Isolate* isolate_;
   int next_block_id_;
@@ -715,6 +719,16 @@ class FunctionState {
 
   FunctionState* outer() { return outer_; }
 
+  HEnterInlined* entry() { return entry_; }
+  void set_entry(HEnterInlined* entry) { entry_ = entry; }
+
+  HArgumentsElements* arguments_elements() { return arguments_elements_; }
+  void set_arguments_elements(HArgumentsElements* arguments_elements) {
+    arguments_elements_ = arguments_elements;
+  }
+
+  bool arguments_pushed() { return arguments_elements() != NULL; }
+
  private:
   HGraphBuilder* owner_;
 
@@ -741,6 +755,12 @@ class FunctionState {
   // return blocks.  NULL in all other cases.
   TestContext* test_context_;
 
+  // When inlining HEnterInlined instruction corresponding to the function
+  // entry.
+  HEnterInlined* entry_;
+
+  HArgumentsElements* arguments_elements_;
+
   FunctionState* outer_;
 };
 
@@ -851,15 +871,11 @@ class HGraphBuilder: public AstVisitor {
   static const int kMaxLoadPolymorphism = 4;
   static const int kMaxStorePolymorphism = 4;
 
-  static const int kMaxInlinedNodes = 196;
-  static const int kMaxInlinedSize = 196;
-  static const int kMaxSourceSize = 600;
-
   // Even in the 'unlimited' case we have to have some limit in order not to
   // overflow the stack.
-  static const int kUnlimitedMaxInlinedNodes = 1000;
-  static const int kUnlimitedMaxInlinedSize = 1000;
-  static const int kUnlimitedMaxSourceSize = 600;
+  static const int kUnlimitedMaxInlinedSourceSize = 100000;
+  static const int kUnlimitedMaxInlinedNodes = 10000;
+  static const int kUnlimitedMaxInlinedNodesCumulative = 10000;
 
   // Simple accessors.
   void set_function_state(FunctionState* state) { function_state_ = state; }
@@ -896,11 +912,6 @@ class HGraphBuilder: public AstVisitor {
   INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
 #undef INLINE_FUNCTION_GENERATOR_DECLARATION
 
-  void HandleDeclaration(VariableProxy* proxy,
-                         VariableMode mode,
-                         FunctionLiteral* function,
-                         int* global_count);
-
   void VisitDelete(UnaryOperation* expr);
   void VisitVoid(UnaryOperation* expr);
   void VisitTypeof(UnaryOperation* expr);
@@ -994,11 +1005,13 @@ class HGraphBuilder: public AstVisitor {
                                             LookupResult* lookup,
                                             bool is_store);
 
+  void EnsureArgumentsArePushedForAccess();
   bool TryArgumentsAccess(Property* expr);
 
   // Try to optimize fun.apply(receiver, arguments) pattern.
   bool TryCallApply(Call* expr);
 
+  int InliningAstSize(Handle<JSFunction> target);
   bool TryInline(CallKind call_kind,
                  Handle<JSFunction> target,
                  ZoneList<Expression*>* arguments,
@@ -1149,6 +1162,7 @@ class HGraphBuilder: public AstVisitor {
   HBasicBlock* current_block_;
 
   int inlined_count_;
+  ZoneList<Handle<Object> > globals_;
 
   Zone* zone_;
 
@@ -1223,6 +1237,30 @@ class HValueMap: public ZoneObject {
 };
 
 
+class HSideEffectMap BASE_EMBEDDED {
+ public:
+  HSideEffectMap();
+  explicit HSideEffectMap(HSideEffectMap* other);
+
+  void Kill(GVNFlagSet flags);
+
+  void Store(GVNFlagSet flags, HInstruction* instr);
+
+  bool IsEmpty() const { return count_ == 0; }
+
+  inline HInstruction* operator[](int i) const {
+    ASSERT(0 <= i);
+    ASSERT(i < kNumberOfTrackedSideEffects);
+    return data_[i];
+  }
+  inline HInstruction* at(int i) const { return operator[](i); }
+
+ private:
+  int count_;
+  HInstruction* data_[kNumberOfTrackedSideEffects];
+};
+
+
 class HStatistics: public Malloced {
  public:
   void Initialize(CompilationInfo* info);
index 929b485..4ead80b 100644 (file)
@@ -640,6 +640,9 @@ class Assembler : public AssemblerBase {
   static const byte kJccShortPrefix = 0x70;
   static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
   static const byte kJcShortOpcode = kJccShortPrefix | carry;
+  static const byte kJnzShortOpcode = kJccShortPrefix | not_zero;
+  static const byte kJzShortOpcode = kJccShortPrefix | zero;
+
 
   // ---------------------------------------------------------------------------
   // Code generation
index a5d42cf..a36763d 100644 (file)
@@ -831,7 +831,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
 
     // Copy all arguments from the array to the stack.
     Label entry, loop;
-    __ mov(eax, Operand(ebp, kIndexOffset));
+    __ mov(ecx, Operand(ebp, kIndexOffset));
     __ jmp(&entry);
     __ bind(&loop);
     __ mov(edx, Operand(ebp, kArgumentsOffset));  // load arguments
@@ -848,16 +848,17 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
     __ push(eax);
 
     // Update the index on the stack and in register eax.
-    __ mov(eax, Operand(ebp, kIndexOffset));
-    __ add(eax, Immediate(1 << kSmiTagSize));
-    __ mov(Operand(ebp, kIndexOffset), eax);
+    __ mov(ecx, Operand(ebp, kIndexOffset));
+    __ add(ecx, Immediate(1 << kSmiTagSize));
+    __ mov(Operand(ebp, kIndexOffset), ecx);
 
     __ bind(&entry);
-    __ cmp(eax, Operand(ebp, kLimitOffset));
+    __ cmp(ecx, Operand(ebp, kLimitOffset));
     __ j(not_equal, &loop);
 
     // Invoke the function.
     Label call_proxy;
+    __ mov(eax, ecx);
     ParameterCount actual(eax);
     __ SmiUntag(eax);
     __ mov(edi, Operand(ebp, kFunctionOffset));
index 2568dae..a1c6edd 100644 (file)
@@ -145,11 +145,6 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
   __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
   __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
 
-  // Copy the qml global object from the previous context.
-  __ mov(ebx, Operand(esi, Context::SlotOffset(Context::QML_GLOBAL_INDEX)));
-  __ mov(Operand(eax, Context::SlotOffset(Context::QML_GLOBAL_INDEX)), ebx);
-
-
   // Initialize the rest of the slots to undefined.
   __ mov(ebx, factory->undefined_value());
   for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
@@ -216,10 +211,6 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
   __ mov(ebx, ContextOperand(esi, Context::GLOBAL_INDEX));
   __ mov(ContextOperand(eax, Context::GLOBAL_INDEX), ebx);
 
-  // Copy the qml global object from the previous context.
-  __ mov(ebx, ContextOperand(esi, Context::QML_GLOBAL_INDEX));
-  __ mov(ContextOperand(eax, Context::QML_GLOBAL_INDEX), ebx);
-
   // Initialize the rest of the slots to the hole value.
   if (slots_ == 1) {
     __ mov(ContextOperand(eax, Context::MIN_CONTEXT_SLOTS),
@@ -1690,6 +1681,11 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
 }
 
 
+// Input:
+//    edx: left operand (tagged)
+//    eax: right operand (tagged)
+// Output:
+//    eax: result (tagged)
 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
   Label call_runtime;
   ASSERT(operands_type_ == BinaryOpIC::INT32);
@@ -1699,31 +1695,37 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
     case Token::ADD:
     case Token::SUB:
     case Token::MUL:
-    case Token::DIV: {
+    case Token::DIV:
+    case Token::MOD: {
       Label not_floats;
       Label not_int32;
       if (CpuFeatures::IsSupported(SSE2)) {
         CpuFeatures::Scope use_sse2(SSE2);
         FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
         FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
-        switch (op_) {
-          case Token::ADD: __ addsd(xmm0, xmm1); break;
-          case Token::SUB: __ subsd(xmm0, xmm1); break;
-          case Token::MUL: __ mulsd(xmm0, xmm1); break;
-          case Token::DIV: __ divsd(xmm0, xmm1); break;
-          default: UNREACHABLE();
-        }
-        // Check result type if it is currently Int32.
-        if (result_type_ <= BinaryOpIC::INT32) {
-          __ cvttsd2si(ecx, Operand(xmm0));
-          __ cvtsi2sd(xmm2, ecx);
-          __ ucomisd(xmm0, xmm2);
-          __ j(not_zero, &not_int32);
-          __ j(carry, &not_int32);
+        if (op_ == Token::MOD) {
+          GenerateRegisterArgsPush(masm);
+          __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+        } else {
+          switch (op_) {
+            case Token::ADD: __ addsd(xmm0, xmm1); break;
+            case Token::SUB: __ subsd(xmm0, xmm1); break;
+            case Token::MUL: __ mulsd(xmm0, xmm1); break;
+            case Token::DIV: __ divsd(xmm0, xmm1); break;
+            default: UNREACHABLE();
+          }
+          // Check result type if it is currently Int32.
+          if (result_type_ <= BinaryOpIC::INT32) {
+            __ cvttsd2si(ecx, Operand(xmm0));
+            __ cvtsi2sd(xmm2, ecx);
+            __ ucomisd(xmm0, xmm2);
+            __ j(not_zero, &not_int32);
+            __ j(carry, &not_int32);
+          }
+          GenerateHeapResultAllocation(masm, &call_runtime);
+          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+          __ ret(0);
         }
-        GenerateHeapResultAllocation(masm, &call_runtime);
-        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
-        __ ret(0);
       } else {  // SSE2 not available, use FPU.
         FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
         FloatingPointHelper::LoadFloatOperands(
@@ -1731,20 +1733,28 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
             ecx,
             FloatingPointHelper::ARGS_IN_REGISTERS);
         FloatingPointHelper::CheckFloatOperandsAreInt32(masm, &not_int32);
-        switch (op_) {
-          case Token::ADD: __ faddp(1); break;
-          case Token::SUB: __ fsubp(1); break;
-          case Token::MUL: __ fmulp(1); break;
-          case Token::DIV: __ fdivp(1); break;
-          default: UNREACHABLE();
+        if (op_ == Token::MOD) {
+          // The operands are now on the FPU stack, but we don't need them.
+          __ fstp(0);
+          __ fstp(0);
+          GenerateRegisterArgsPush(masm);
+          __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+        } else {
+          switch (op_) {
+            case Token::ADD: __ faddp(1); break;
+            case Token::SUB: __ fsubp(1); break;
+            case Token::MUL: __ fmulp(1); break;
+            case Token::DIV: __ fdivp(1); break;
+            default: UNREACHABLE();
+          }
+          Label after_alloc_failure;
+          GenerateHeapResultAllocation(masm, &after_alloc_failure);
+          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+          __ ret(0);
+          __ bind(&after_alloc_failure);
+          __ fstp(0);  // Pop FPU stack before calling runtime.
+          __ jmp(&call_runtime);
         }
-        Label after_alloc_failure;
-        GenerateHeapResultAllocation(masm, &after_alloc_failure);
-        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
-        __ ret(0);
-        __ bind(&after_alloc_failure);
-        __ ffree();
-        __ jmp(&call_runtime);
       }
 
       __ bind(&not_floats);
@@ -1753,10 +1763,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
       break;
     }
 
-    case Token::MOD: {
-      // For MOD we go directly to runtime in the non-smi case.
-      break;
-    }
     case Token::BIT_OR:
     case Token::BIT_AND:
     case Token::BIT_XOR:
@@ -1767,11 +1773,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
       Label not_floats;
       Label not_int32;
       Label non_smi_result;
-      /*  {
-        CpuFeatures::Scope use_sse2(SSE2);
-        FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
-        FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
-        }*/
       FloatingPointHelper::LoadUnknownsAsIntegers(masm,
                                                   use_sse3_,
                                                   &not_floats);
@@ -1842,8 +1843,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
     default: UNREACHABLE(); break;
   }
 
-  // If an allocation fails, or SHR or MOD hit a hard case,
-  // use the runtime system to get the correct result.
+  // If an allocation fails, or SHR hits a hard case, use the runtime system to
+  // get the correct result.
   __ bind(&call_runtime);
 
   switch (op_) {
@@ -1864,8 +1865,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
       __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
       break;
     case Token::MOD:
-      GenerateRegisterArgsPush(masm);
-      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
       break;
     case Token::BIT_OR:
       __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
@@ -1966,7 +1965,7 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
         __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
         __ ret(0);
         __ bind(&after_alloc_failure);
-        __ ffree();
+        __ fstp(0);  // Pop FPU stack before calling runtime.
         __ jmp(&call_runtime);
       }
 
@@ -2170,8 +2169,8 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
         __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
         __ ret(0);
         __ bind(&after_alloc_failure);
-          __ ffree();
-          __ jmp(&call_runtime);
+        __ fstp(0);  // Pop FPU stack before calling runtime.
+        __ jmp(&call_runtime);
       }
         __ bind(&not_floats);
         break;
@@ -4261,39 +4260,6 @@ void CompareStub::Generate(MacroAssembler* masm) {
   // NOTICE! This code is only reached after a smi-fast-case check, so
   // it is certain that at least one operand isn't a smi.
 
-  {
-    Label not_user_equal, user_equal;
-    __ test(eax, Immediate(kSmiTagMask));
-    __ j(zero, &not_user_equal);
-    __ test(edx, Immediate(kSmiTagMask));
-    __ j(zero, &not_user_equal);
-
-    __ CmpObjectType(eax, JS_OBJECT_TYPE, ebx);
-    __ j(not_equal, &not_user_equal);
-
-    __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
-    __ j(not_equal, &not_user_equal);
-
-    __ test_b(FieldOperand(ebx, Map::kBitField2Offset),
-              1 << Map::kUseUserObjectComparison);
-    __ j(not_zero, &user_equal);
-    __ test_b(FieldOperand(ecx, Map::kBitField2Offset),
-              1 << Map::kUseUserObjectComparison);
-    __ j(not_zero, &user_equal);
-
-    __ jmp(&not_user_equal);
-
-    __ bind(&user_equal);
-   
-    __ pop(ebx); // Return address.
-    __ push(eax);
-    __ push(edx);
-    __ push(ebx);
-    __ TailCallRuntime(Runtime::kUserObjectEquals, 2, 1);
-   
-    __ bind(&not_user_equal);
-  }
-
   // Identical objects can be compared fast, but there are some tricky cases
   // for NaN and undefined.
   {
@@ -5048,11 +5014,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
   __ j(not_equal, &not_outermost_js, Label::kNear);
   __ mov(Operand::StaticVariable(js_entry_sp), ebp);
   __ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
-  Label cont;
-  __ jmp(&cont, Label::kNear);
+  __ jmp(&invoke, Label::kNear);
   __ bind(&not_outermost_js);
   __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
-  __ bind(&cont);
 
   // Jump to a faked try block that does the invoke, with a faked catch
   // block that sets the pending exception.
@@ -6204,7 +6168,11 @@ void SubStringStub::Generate(MacroAssembler* masm) {
   __ sub(ecx, edx);
   __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
   Label not_original_string;
-  __ j(not_equal, &not_original_string, Label::kNear);
+  // Shorter than original string's length: an actual substring.
+  __ j(below, &not_original_string, Label::kNear);
+  // Longer than original string's length or negative: unsafe arguments.
+  __ j(above, &runtime);
+  // Return original string.
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->sub_string_native(), 1);
   __ ret(3 * kPointerSize);
@@ -6783,14 +6751,8 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
 
   __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
   __ j(not_equal, &miss, Label::kNear);
-  __ test_b(FieldOperand(ecx, Map::kBitField2Offset),
-            1 << Map::kUseUserObjectComparison);
-  __ j(not_zero, &miss, Label::kNear);
   __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
   __ j(not_equal, &miss, Label::kNear);
-  __ test_b(FieldOperand(ecx, Map::kBitField2Offset),
-            1 << Map::kUseUserObjectComparison);
-  __ j(not_zero, &miss, Label::kNear);
 
   ASSERT(GetCondition() == equal);
   __ sub(eax, edx);
@@ -6811,14 +6773,8 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
   __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
   __ cmp(ecx, known_map_);
   __ j(not_equal, &miss, Label::kNear);
-  __ test_b(FieldOperand(ecx, Map::kBitField2Offset),
-            1 << Map::kUseUserObjectComparison);
-  __ j(not_zero, &miss, Label::kNear);
   __ cmp(ebx, known_map_);
   __ j(not_equal, &miss, Label::kNear);
-  __ test_b(FieldOperand(ebx, Map::kBitField2Offset),
-            1 << Map::kUseUserObjectComparison);
-  __ j(not_zero, &miss, Label::kNear);
 
   __ sub(eax, edx);
   __ ret(0);
index ea61910..cff6454 100644 (file)
@@ -397,9 +397,25 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
   // Allocate new FixedDoubleArray.
   // edx: receiver
   // edi: length of source FixedArray (smi-tagged)
-  __ lea(esi, Operand(edi, times_4, FixedDoubleArray::kHeaderSize));
+  __ lea(esi, Operand(edi,
+                      times_4,
+                      FixedDoubleArray::kHeaderSize + kPointerSize));
   __ AllocateInNewSpace(esi, eax, ebx, no_reg, &gc_required, TAG_OBJECT);
 
+  Label aligned, aligned_done;
+  __ test(eax, Immediate(kDoubleAlignmentMask - kHeapObjectTag));
+  __ j(zero, &aligned, Label::kNear);
+  __ mov(FieldOperand(eax, 0),
+         Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
+  __ add(eax, Immediate(kPointerSize));
+  __ jmp(&aligned_done);
+
+  __ bind(&aligned);
+  __ mov(Operand(eax, esi, times_1, -kPointerSize-1),
+         Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
+
+  __ bind(&aligned_done);
+
   // eax: destination FixedDoubleArray
   // edi: number of elements
   // edx: receiver
index d13fa75..d153e18 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -91,9 +91,11 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
   rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
 }
 
+// All debug break stubs support padding for LiveEdit.
+const bool Debug::FramePaddingLayout::kIsSupported = true;
 
-#define __ ACCESS_MASM(masm)
 
+#define __ ACCESS_MASM(masm)
 
 static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
                                           RegList object_regs,
@@ -103,6 +105,13 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
+    // Load padding words on stack.
+    for (int i = 0; i < Debug::FramePaddingLayout::kInitialSize; i++) {
+      __ push(Immediate(Smi::FromInt(
+          Debug::FramePaddingLayout::kPaddingValue)));
+    }
+    __ push(Immediate(Smi::FromInt(Debug::FramePaddingLayout::kInitialSize)));
+
     // Store the registers containing live values on the expression stack to
     // make sure that these are correctly updated during GC. Non object values
     // are stored as a smi causing it to be untouched by GC.
@@ -134,6 +143,10 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
     CEntryStub ceb(1);
     __ CallStub(&ceb);
 
+    // Automatically find register that could be used after register restore.
+    // We need one register for padding skip instructions.
+    Register unused_reg = { -1 };
+
     // Restore the register values containing object pointers from the
     // expression stack.
     for (int i = kNumJSCallerSaved; --i >= 0;) {
@@ -142,15 +155,29 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
       if (FLAG_debug_code) {
         __ Set(reg, Immediate(kDebugZapValue));
       }
+      bool taken = reg.code() == esi.code();
       if ((object_regs & (1 << r)) != 0) {
         __ pop(reg);
+        taken = true;
       }
       if ((non_object_regs & (1 << r)) != 0) {
         __ pop(reg);
         __ SmiUntag(reg);
+        taken = true;
+      }
+      if (!taken) {
+        unused_reg = reg;
       }
     }
 
+    ASSERT(unused_reg.code() != -1);
+
+    // Read current padding counter and skip corresponding number of words.
+    __ pop(unused_reg);
+    // We divide stored value by 2 (untagging) and multiply it by word's size.
+    STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
+    __ lea(esp, Operand(esp, unused_reg, times_half_pointer_size, 0));
+
     // Get rid of the internal frame.
   }
 
@@ -172,10 +199,10 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
 void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
   // Register state for IC load call (from ic-ia32.cc).
   // ----------- S t a t e -------------
-  //  -- eax    : receiver
   //  -- ecx    : name
+  //  -- edx    : receiver
   // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit(), 0, false);
+  Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false);
 }
 
 
@@ -194,10 +221,10 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
 void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
   // Register state for keyed IC load call (from ic-ia32.cc).
   // ----------- S t a t e -------------
+  //  -- ecx    : key
   //  -- edx    : receiver
-  //  -- eax    : key
   // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, eax.bit() | edx.bit(), 0, false);
+  Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false);
 }
 
 
index 3f10c09..73961e1 100644 (file)
@@ -548,6 +548,8 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
 
 void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
                                               int frame_index) {
+  Builtins* builtins = isolate_->builtins();
+  Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
   JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
   unsigned height = iterator->Next();
   unsigned height_in_bytes = height * kPointerSize;
@@ -555,7 +557,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
     PrintF("  translating construct stub => height=%d\n", height_in_bytes);
   }
 
-  unsigned fixed_frame_size = 6 * kPointerSize;
+  unsigned fixed_frame_size = 7 * kPointerSize;
   unsigned output_frame_size = height_in_bytes + fixed_frame_size;
 
   // Allocate and store the output frame description.
@@ -620,6 +622,15 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
            top_address + output_offset, output_offset, value);
   }
 
+  // The output frame reflects a JSConstructStubGeneric frame.
+  output_offset -= kPointerSize;
+  value = reinterpret_cast<intptr_t>(construct_stub);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; code object\n",
+           top_address + output_offset, output_offset, value);
+  }
+
   // Number of incoming arguments.
   output_offset -= kPointerSize;
   value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
@@ -641,8 +652,6 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
 
   ASSERT(0 == output_offset);
 
-  Builtins* builtins = isolate_->builtins();
-  Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
   uint32_t pc = reinterpret_cast<uint32_t>(
       construct_stub->instruction_start() +
       isolate_->heap()->construct_stub_deopt_pc_offset()->value());
index 203aa36..266afce 100644 (file)
@@ -175,13 +175,12 @@ void FullCodeGenerator::Generate() {
 
   // Possibly allocate a local context.
   int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
-  if (heap_slots > 0 ||
-      (scope()->is_qml_mode() && scope()->is_global_scope())) {
+  if (heap_slots > 0) {
     Comment cmnt(masm_, "[ Allocate local context");
     // Argument to NewContext is the function, which is still in edi.
     __ push(edi);
     if (heap_slots <= FastNewContextStub::kMaximumSlots) {
-      FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
+      FastNewContextStub stub(heap_slots);
       __ CallStub(&stub);
     } else {
       __ CallRuntime(Runtime::kNewFunctionContext, 1);
@@ -263,11 +262,11 @@ void FullCodeGenerator::Generate() {
       // For named function expressions, declare the function name as a
       // constant.
       if (scope()->is_function_scope() && scope()->function() != NULL) {
-        VariableProxy* proxy = scope()->function();
-        ASSERT(proxy->var()->mode() == CONST ||
-               proxy->var()->mode() == CONST_HARMONY);
-        ASSERT(proxy->var()->location() != Variable::UNALLOCATED);
-        EmitDeclaration(proxy, proxy->var()->mode(), NULL);
+        VariableDeclaration* function = scope()->function();
+        ASSERT(function->proxy()->var()->mode() == CONST ||
+               function->proxy()->var()->mode() == CONST_HARMONY);
+        ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
+        VisitVariableDeclaration(function);
       }
       VisitDeclarations(scope()->declarations());
     }
@@ -757,60 +756,51 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
 }
 
 
-void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
-                                        VariableMode mode,
-                                        FunctionLiteral* function) {
+void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
+  // The variable in the declaration always resides in the current function
+  // context.
+  ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+  if (FLAG_debug_code) {
+    // Check that we're not inside a with or catch context.
+    __ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
+    __ cmp(ebx, isolate()->factory()->with_context_map());
+    __ Check(not_equal, "Declaration in with context.");
+    __ cmp(ebx, isolate()->factory()->catch_context_map());
+    __ Check(not_equal, "Declaration in catch context.");
+  }
+}
+
+
+void FullCodeGenerator::VisitVariableDeclaration(
+    VariableDeclaration* declaration) {
   // If it was not possible to allocate the variable at compile time, we
   // need to "declare" it at runtime to make sure it actually exists in the
   // local context.
+  VariableProxy* proxy = declaration->proxy();
+  VariableMode mode = declaration->mode();
   Variable* variable = proxy->var();
-  bool binding_needs_init = (function == NULL) &&
-      (mode == CONST || mode == CONST_HARMONY || mode == LET);
+  bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
   switch (variable->location()) {
     case Variable::UNALLOCATED:
-      ++global_count_;
+      globals_->Add(variable->name());
+      globals_->Add(variable->binding_needs_init()
+                        ? isolate()->factory()->the_hole_value()
+                        : isolate()->factory()->undefined_value());
       break;
 
     case Variable::PARAMETER:
     case Variable::LOCAL:
-      if (function != NULL) {
-        Comment cmnt(masm_, "[ Declaration");
-        VisitForAccumulatorValue(function);
-        __ mov(StackOperand(variable), result_register());
-      } else if (binding_needs_init) {
-        Comment cmnt(masm_, "[ Declaration");
+      if (hole_init) {
+        Comment cmnt(masm_, "[ VariableDeclaration");
         __ mov(StackOperand(variable),
                Immediate(isolate()->factory()->the_hole_value()));
       }
       break;
 
     case Variable::CONTEXT:
-      // The variable in the decl always resides in the current function
-      // context.
-      ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
-      if (FLAG_debug_code) {
-        // Check that we're not inside a with or catch context.
-        __ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
-        __ cmp(ebx, isolate()->factory()->with_context_map());
-        __ Check(not_equal, "Declaration in with context.");
-        __ cmp(ebx, isolate()->factory()->catch_context_map());
-        __ Check(not_equal, "Declaration in catch context.");
-      }
-      if (function != NULL) {
-        Comment cmnt(masm_, "[ Declaration");
-        VisitForAccumulatorValue(function);
-        __ mov(ContextOperand(esi, variable->index()), result_register());
-        // We know that we have written a function, which is not a smi.
-        __ RecordWriteContextSlot(esi,
-                                  Context::SlotOffset(variable->index()),
-                                  result_register(),
-                                  ecx,
-                                  kDontSaveFPRegs,
-                                  EMIT_REMEMBERED_SET,
-                                  OMIT_SMI_CHECK);
-        PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
-      } else if (binding_needs_init) {
-        Comment cmnt(masm_, "[ Declaration");
+      if (hole_init) {
+        Comment cmnt(masm_, "[ VariableDeclaration");
+        EmitDebugCheckDeclarationContext(variable);
         __ mov(ContextOperand(esi, variable->index()),
                Immediate(isolate()->factory()->the_hole_value()));
         // No write barrier since the hole value is in old space.
@@ -819,14 +809,12 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
       break;
 
     case Variable::LOOKUP: {
-      Comment cmnt(masm_, "[ Declaration");
+      Comment cmnt(masm_, "[ VariableDeclaration");
       __ push(esi);
       __ push(Immediate(variable->name()));
-      // Declaration nodes are always introduced in one of four modes.
-      ASSERT(mode == VAR ||
-             mode == CONST ||
-             mode == CONST_HARMONY ||
-             mode == LET);
+      // VariableDeclaration nodes are always introduced in one of four modes.
+      ASSERT(mode == VAR || mode == LET ||
+             mode == CONST || mode == CONST_HARMONY);
       PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
           ? READ_ONLY : NONE;
       __ push(Immediate(Smi::FromInt(attr)));
@@ -834,9 +822,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
       // Note: For variables we must not push an initial value (such as
       // 'undefined') because we may have a (legal) redeclaration and we
       // must not destroy the current value.
-      if (function != NULL) {
-        VisitForStackValue(function);
-      } else if (binding_needs_init) {
+      if (hole_init) {
         __ push(Immediate(isolate()->factory()->the_hole_value()));
       } else {
         __ push(Immediate(Smi::FromInt(0)));  // Indicates no initial value.
@@ -848,6 +834,118 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
 }
 
 
+void FullCodeGenerator::VisitFunctionDeclaration(
+    FunctionDeclaration* declaration) {
+  VariableProxy* proxy = declaration->proxy();
+  Variable* variable = proxy->var();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      globals_->Add(variable->name());
+      Handle<SharedFunctionInfo> function =
+          Compiler::BuildFunctionInfo(declaration->fun(), script());
+      // Check for stack-overflow exception.
+      if (function.is_null()) return SetStackOverflow();
+      globals_->Add(function);
+      break;
+    }
+
+    case Variable::PARAMETER:
+    case Variable::LOCAL: {
+      Comment cmnt(masm_, "[ FunctionDeclaration");
+      VisitForAccumulatorValue(declaration->fun());
+      __ mov(StackOperand(variable), result_register());
+      break;
+    }
+
+    case Variable::CONTEXT: {
+      Comment cmnt(masm_, "[ FunctionDeclaration");
+      EmitDebugCheckDeclarationContext(variable);
+      VisitForAccumulatorValue(declaration->fun());
+      __ mov(ContextOperand(esi, variable->index()), result_register());
+      // We know that we have written a function, which is not a smi.
+      __ RecordWriteContextSlot(esi,
+                                Context::SlotOffset(variable->index()),
+                                result_register(),
+                                ecx,
+                                kDontSaveFPRegs,
+                                EMIT_REMEMBERED_SET,
+                                OMIT_SMI_CHECK);
+      PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+      break;
+    }
+
+    case Variable::LOOKUP: {
+      Comment cmnt(masm_, "[ FunctionDeclaration");
+      __ push(esi);
+      __ push(Immediate(variable->name()));
+      __ push(Immediate(Smi::FromInt(NONE)));
+      VisitForStackValue(declaration->fun());
+      __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+      break;
+    }
+  }
+}
+
+
+void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
+  VariableProxy* proxy = declaration->proxy();
+  Variable* variable = proxy->var();
+  Handle<JSModule> instance = declaration->module()->interface()->Instance();
+  ASSERT(!instance.is_null());
+
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      Comment cmnt(masm_, "[ ModuleDeclaration");
+      globals_->Add(variable->name());
+      globals_->Add(instance);
+      Visit(declaration->module());
+      break;
+    }
+
+    case Variable::CONTEXT: {
+      Comment cmnt(masm_, "[ ModuleDeclaration");
+      EmitDebugCheckDeclarationContext(variable);
+      __ mov(ContextOperand(esi, variable->index()), Immediate(instance));
+      Visit(declaration->module());
+      break;
+    }
+
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+    case Variable::LOOKUP:
+      UNREACHABLE();
+  }
+}
+
+
+void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
+  VariableProxy* proxy = declaration->proxy();
+  Variable* variable = proxy->var();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED:
+      // TODO(rossberg)
+      break;
+
+    case Variable::CONTEXT: {
+      Comment cmnt(masm_, "[ ImportDeclaration");
+      EmitDebugCheckDeclarationContext(variable);
+      // TODO(rossberg)
+      break;
+    }
+
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+    case Variable::LOOKUP:
+      UNREACHABLE();
+  }
+}
+
+
+void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
+  // TODO(rossberg)
+}
+
+
 void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.
   __ push(esi);  // The context is the first argument.
@@ -1188,7 +1286,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
 
   // All extension objects were empty and it is safe to use a global
   // load IC call.
-  __ mov(eax, var->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+  __ mov(edx, GlobalObjectOperand());
   __ mov(ecx, var->name());
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
   RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
@@ -1272,7 +1370,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
       Comment cmnt(masm_, "Global variable");
       // Use inline caching. Variable name is passed in ecx and the global
       // object in eax.
-      __ mov(eax, var->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+      __ mov(edx, GlobalObjectOperand());
       __ mov(ecx, var->name());
       Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
       CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
@@ -1666,9 +1764,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
       break;
     case NAMED_PROPERTY:
       if (expr->is_compound()) {
-        // We need the receiver both on the stack and in the accumulator.
-        VisitForAccumulatorValue(property->obj());
-        __ push(result_register());
+        // We need the receiver both on the stack and in edx.
+        VisitForStackValue(property->obj());
+        __ mov(edx, Operand(esp, 0));
       } else {
         VisitForStackValue(property->obj());
       }
@@ -1676,9 +1774,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
     case KEYED_PROPERTY: {
       if (expr->is_compound()) {
         VisitForStackValue(property->obj());
-        VisitForAccumulatorValue(property->key());
-        __ mov(edx, Operand(esp, 0));
-        __ push(eax);
+        VisitForStackValue(property->key());
+        __ mov(edx, Operand(esp, kPointerSize));  // Object.
+        __ mov(ecx, Operand(esp, 0));             // Key.
       } else {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
@@ -1921,7 +2019,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
       VisitForStackValue(prop->obj());
       VisitForAccumulatorValue(prop->key());
       __ mov(ecx, eax);
-      __ pop(edx);
+      __ pop(edx);  // Receiver.
       __ pop(eax);  // Restore value.
       Handle<Code> ic = is_classic_mode()
           ? isolate()->builtins()->KeyedStoreIC_Initialize()
@@ -1939,7 +2037,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
   if (var->IsUnallocated()) {
     // Global var, const, or let.
     __ mov(ecx, var->name());
-    __ mov(edx, var->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+    __ mov(edx, GlobalObjectOperand());
     Handle<Code> ic = is_classic_mode()
         ? isolate()->builtins()->StoreIC_Initialize()
         : isolate()->builtins()->StoreIC_Initialize_Strict();
@@ -2027,6 +2125,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
 
 void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
   // Assignment to a property, using a named store IC.
+  // eax    : value
+  // esp[0] : receiver
+
   Property* prop = expr->target()->AsProperty();
   ASSERT(prop != NULL);
   ASSERT(prop->key()->AsLiteral() != NULL);
@@ -2069,6 +2170,9 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
 
 void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
   // Assignment to a property, using a keyed store IC.
+  // eax               : value
+  // esp[0]            : key
+  // esp[kPointerSize] : receiver
 
   // If the assignment starts a block of assignments to the same object,
   // change to slow case to avoid the quadratic behavior of repeatedly
@@ -2081,7 +2185,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
     __ pop(result_register());
   }
 
-  __ pop(ecx);
+  __ pop(ecx);  // Key.
   if (expr->ends_initialization_block()) {
     __ mov(edx, Operand(esp, 0));  // Leave receiver on the stack for later.
   } else {
@@ -2114,12 +2218,14 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
 
   if (key->IsPropertyName()) {
     VisitForAccumulatorValue(expr->obj());
+    __ mov(edx, result_register());
     EmitNamedPropertyLoad(expr);
     context()->Plug(eax);
   } else {
     VisitForStackValue(expr->obj());
     VisitForAccumulatorValue(expr->key());
-    __ pop(edx);
+    __ pop(edx);                     // Object.
+    __ mov(ecx, result_register());  // Key.
     EmitKeyedPropertyLoad(expr);
     context()->Plug(eax);
   }
@@ -2242,11 +2348,8 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
   // Push the start position of the scope the calls resides in.
   __ push(Immediate(Smi::FromInt(scope()->start_position())));
 
-  // Push the qml mode flag
-  __ push(Immediate(Smi::FromInt(is_qml_mode())));
-
   // Do the runtime call.
-  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
+  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
 }
 
 
@@ -2299,7 +2402,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
 
   } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
     // Push global object as receiver for the call IC.
-    __ push(proxy->var()->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+    __ push(GlobalObjectOperand());
     EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
 
   } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@@ -3302,99 +3405,6 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
 }
 
 
-void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 3);
-  VisitForStackValue(args->at(0));
-  VisitForStackValue(args->at(1));
-  VisitForStackValue(args->at(2));
-  Label done;
-  Label slow_case;
-  Register object = eax;
-  Register index_1 = ebx;
-  Register index_2 = ecx;
-  Register elements = edi;
-  Register temp = edx;
-  __ mov(object, Operand(esp, 2 * kPointerSize));
-  // Fetch the map and check if array is in fast case.
-  // Check that object doesn't require security checks and
-  // has no indexed interceptor.
-  __ CmpObjectType(object, JS_ARRAY_TYPE, temp);
-  __ j(not_equal, &slow_case);
-  __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
-            KeyedLoadIC::kSlowCaseBitFieldMask);
-  __ j(not_zero, &slow_case);
-
-  // Check the object's elements are in fast case and writable.
-  __ mov(elements, FieldOperand(object, JSObject::kElementsOffset));
-  __ cmp(FieldOperand(elements, HeapObject::kMapOffset),
-         Immediate(isolate()->factory()->fixed_array_map()));
-  __ j(not_equal, &slow_case);
-
-  // Check that both indices are smis.
-  __ mov(index_1, Operand(esp, 1 * kPointerSize));
-  __ mov(index_2, Operand(esp, 0));
-  __ mov(temp, index_1);
-  __ or_(temp, index_2);
-  __ JumpIfNotSmi(temp, &slow_case);
-
-  // Check that both indices are valid.
-  __ mov(temp, FieldOperand(object, JSArray::kLengthOffset));
-  __ cmp(temp, index_1);
-  __ j(below_equal, &slow_case);
-  __ cmp(temp, index_2);
-  __ j(below_equal, &slow_case);
-
-  // Bring addresses into index1 and index2.
-  __ lea(index_1, CodeGenerator::FixedArrayElementOperand(elements, index_1));
-  __ lea(index_2, CodeGenerator::FixedArrayElementOperand(elements, index_2));
-
-  // Swap elements.  Use object and temp as scratch registers.
-  __ mov(object, Operand(index_1, 0));
-  __ mov(temp,   Operand(index_2, 0));
-  __ mov(Operand(index_2, 0), object);
-  __ mov(Operand(index_1, 0), temp);
-
-  Label no_remembered_set;
-  __ CheckPageFlag(elements,
-                   temp,
-                   1 << MemoryChunk::SCAN_ON_SCAVENGE,
-                   not_zero,
-                   &no_remembered_set,
-                   Label::kNear);
-  // Possible optimization: do a check that both values are Smis
-  // (or them and test against Smi mask.)
-
-  // We are swapping two objects in an array and the incremental marker never
-  // pauses in the middle of scanning a single object.  Therefore the
-  // incremental marker is not disturbed, so we don't need to call the
-  // RecordWrite stub that notifies the incremental marker.
-  __ RememberedSetHelper(elements,
-                         index_1,
-                         temp,
-                         kDontSaveFPRegs,
-                         MacroAssembler::kFallThroughAtEnd);
-  __ RememberedSetHelper(elements,
-                         index_2,
-                         temp,
-                         kDontSaveFPRegs,
-                         MacroAssembler::kFallThroughAtEnd);
-
-  __ bind(&no_remembered_set);
-
-  // We are done. Drop elements from the stack, and return undefined.
-  __ add(esp, Immediate(3 * kPointerSize));
-  __ mov(eax, isolate()->factory()->undefined_value());
-  __ jmp(&done);
-
-  __ bind(&slow_case);
-  __ CallRuntime(Runtime::kSwapElements, 3);
-
-  __ bind(&done);
-  context()->Plug(eax);
-}
-
-
 void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
@@ -3849,7 +3859,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
         // but "delete this" is allowed.
         ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
         if (var->IsUnallocated()) {
-          __ push(var->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+          __ push(GlobalObjectOperand());
           __ push(Immediate(var->name()));
           __ push(Immediate(Smi::FromInt(kNonStrictMode)));
           __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
@@ -4014,15 +4024,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
       __ push(Immediate(Smi::FromInt(0)));
     }
     if (assign_type == NAMED_PROPERTY) {
-      // Put the object both on the stack and in the accumulator.
+      // Put the object both on the stack and in edx.
       VisitForAccumulatorValue(prop->obj());
       __ push(eax);
+      __ mov(edx, eax);
       EmitNamedPropertyLoad(prop);
     } else {
       VisitForStackValue(prop->obj());
-      VisitForAccumulatorValue(prop->key());
-      __ mov(edx, Operand(esp, 0));
-      __ push(eax);
+      VisitForStackValue(prop->key());
+      __ mov(edx, Operand(esp, kPointerSize));  // Object.
+      __ mov(ecx, Operand(esp, 0));             // Key.
       EmitKeyedPropertyLoad(prop);
     }
   }
@@ -4169,7 +4180,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
 
   if (proxy != NULL && proxy->var()->IsUnallocated()) {
     Comment cmnt(masm_, "Global variable");
-    __ mov(eax, proxy->var()->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+    __ mov(edx, GlobalObjectOperand());
     __ mov(ecx, Immediate(proxy->name()));
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
     // Use a regular load, not a contextual load, to avoid a reference
@@ -4434,7 +4445,8 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
 
 void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
   Scope* declaration_scope = scope()->DeclarationScope();
-  if (declaration_scope->is_global_scope()) {
+  if (declaration_scope->is_global_scope() ||
+      declaration_scope->is_module_scope()) {
     // Contexts nested in the global context have a canonical empty function
     // as their closure, not the anonymous closure containing the global
     // code.  Pass a smi sentinel and let the runtime look up the empty
index 33f247a..dc64a09 100644 (file)
@@ -218,13 +218,13 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
 
 void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- eax    : receiver
   //  -- ecx    : name
+  //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
   Label miss;
 
-  StubCompiler::GenerateLoadArrayLength(masm, eax, edx, &miss);
+  StubCompiler::GenerateLoadArrayLength(masm, edx, eax, &miss);
   __ bind(&miss);
   StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
 }
@@ -233,13 +233,13 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
 void LoadIC::GenerateStringLength(MacroAssembler* masm,
                                   bool support_wrappers) {
   // ----------- S t a t e -------------
-  //  -- eax    : receiver
   //  -- ecx    : name
+  //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
   Label miss;
 
-  StubCompiler::GenerateLoadStringLength(masm, eax, edx, ebx, &miss,
+  StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss,
                                          support_wrappers);
   __ bind(&miss);
   StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@@ -248,13 +248,13 @@ void LoadIC::GenerateStringLength(MacroAssembler* masm,
 
 void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- eax    : receiver
   //  -- ecx    : name
+  //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
   Label miss;
 
-  StubCompiler::GenerateLoadFunctionPrototype(masm, eax, edx, ebx, &miss);
+  StubCompiler::GenerateLoadFunctionPrototype(masm, edx, eax, ebx, &miss);
   __ bind(&miss);
   StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
 }
@@ -383,7 +383,7 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
   __ j(below, slow_case);
 
   // Check that the key is a positive smi.
-  __ test(key, Immediate(0x8000001));
+  __ test(key, Immediate(0x80000001));
   __ j(not_zero, slow_case);
 
   // Load the elements into scratch1 and check its map.
@@ -396,7 +396,7 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
   __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
   __ sub(scratch2, Immediate(Smi::FromInt(2)));
   __ cmp(key, scratch2);
-  __ j(greater_equal, unmapped_case);
+  __ j(above_equal, unmapped_case);
 
   // Load element index and check whether it is the hole.
   const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
@@ -443,7 +443,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
 
 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- eax    : key
+  //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
@@ -451,39 +451,34 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
   Label probe_dictionary, check_number_dictionary;
 
   // Check that the key is a smi.
-  __ JumpIfNotSmi(eax, &check_string);
+  __ JumpIfNotSmi(ecx, &check_string);
   __ bind(&index_smi);
   // Now the key is known to be a smi. This place is also jumped to from
   // where a numeric string is converted to a smi.
 
   GenerateKeyedLoadReceiverCheck(
-      masm, edx, ecx, Map::kHasIndexedInterceptor, &slow);
+      masm, edx, eax, Map::kHasIndexedInterceptor, &slow);
 
   // Check the receiver's map to see if it has fast elements.
-  __ CheckFastElements(ecx, &check_number_dictionary);
-
-  GenerateFastArrayLoad(masm,
-                        edx,
-                        eax,
-                        ecx,
-                        eax,
-                        NULL,
-                        &slow);
+  __ CheckFastElements(eax, &check_number_dictionary);
+
+  GenerateFastArrayLoad(masm, edx, ecx, eax, eax, NULL, &slow);
   Isolate* isolate = masm->isolate();
   Counters* counters = isolate->counters();
   __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
   __ ret(0);
+
   __ bind(&check_number_dictionary);
-  __ mov(ebx, eax);
+  __ mov(ebx, ecx);
   __ SmiUntag(ebx);
-  __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+  __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
 
   // Check whether the elements is a number dictionary.
   // edx: receiver
   // ebx: untagged index
-  // eax: key
-  // ecx: elements
-  __ CheckMap(ecx,
+  // ecx: key
+  // eax: elements
+  __ CheckMap(eax,
               isolate->factory()->hash_table_map(),
               &slow,
               DONT_DO_SMI_CHECK);
@@ -491,13 +486,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
   // Push receiver on the stack to free up a register for the dictionary
   // probing.
   __ push(edx);
-  __ LoadFromNumberDictionary(&slow_pop_receiver,
-                              ecx,
-                              eax,
-                              ebx,
-                              edx,
-                              edi,
-                              eax);
+  __ LoadFromNumberDictionary(&slow_pop_receiver, eax, ecx, ebx, edx, edi, eax);
   // Pop receiver before returning.
   __ pop(edx);
   __ ret(0);
@@ -509,15 +498,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
   __ bind(&slow);
   // Slow case: jump to runtime.
   // edx: receiver
-  // eax: key
+  // ecx: key
   __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
   GenerateRuntimeGetProperty(masm);
 
   __ bind(&check_string);
-  GenerateKeyStringCheck(masm, eax, ecx, ebx, &index_string, &slow);
+  GenerateKeyStringCheck(masm, ecx, eax, ebx, &index_string, &slow);
 
   GenerateKeyedLoadReceiverCheck(
-      masm, edx, ecx, Map::kHasNamedInterceptor, &slow);
+      masm, edx, eax, Map::kHasNamedInterceptor, &slow);
 
   // If the receiver is a fast-case object, check the keyed lookup
   // cache. Otherwise probe the dictionary.
@@ -526,15 +515,18 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
          Immediate(isolate->factory()->hash_table_map()));
   __ j(equal, &probe_dictionary);
 
-  // Load the map of the receiver, compute the keyed lookup cache hash
+  // The receiver's map is still in eax, compute the keyed lookup cache hash
   // based on 32 bits of the map pointer and the string hash.
-  __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
-  __ mov(ecx, ebx);
-  __ shr(ecx, KeyedLookupCache::kMapHashShift);
-  __ mov(edi, FieldOperand(eax, String::kHashFieldOffset));
+  if (FLAG_debug_code) {
+    __ cmp(eax, FieldOperand(edx, HeapObject::kMapOffset));
+    __ Check(equal, "Map is no longer in eax.");
+  }
+  __ mov(ebx, eax);  // Keep the map around for later.
+  __ shr(eax, KeyedLookupCache::kMapHashShift);
+  __ mov(edi, FieldOperand(ecx, String::kHashFieldOffset));
   __ shr(edi, String::kHashShift);
-  __ xor_(ecx, edi);
-  __ and_(ecx, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
+  __ xor_(eax, edi);
+  __ and_(eax, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
 
   // Load the key (consisting of map and symbol) from the cache and
   // check for match.
@@ -546,7 +538,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
 
   for (int i = 0; i < kEntriesPerBucket - 1; i++) {
     Label try_next_entry;
-    __ mov(edi, ecx);
+    __ mov(edi, eax);
     __ shl(edi, kPointerSizeLog2 + 1);
     if (i != 0) {
       __ add(edi, Immediate(kPointerSize * i * 2));
@@ -554,25 +546,25 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
     __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
     __ j(not_equal, &try_next_entry);
     __ add(edi, Immediate(kPointerSize));
-    __ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
+    __ cmp(ecx, Operand::StaticArray(edi, times_1, cache_keys));
     __ j(equal, &hit_on_nth_entry[i]);
     __ bind(&try_next_entry);
   }
 
-  __ lea(edi, Operand(ecx, 1));
+  __ lea(edi, Operand(eax, 1));
   __ shl(edi, kPointerSizeLog2 + 1);
   __ add(edi, Immediate(kPointerSize * (kEntriesPerBucket - 1) * 2));
   __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
   __ j(not_equal, &slow);
   __ add(edi, Immediate(kPointerSize));
-  __ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
+  __ cmp(ecx, Operand::StaticArray(edi, times_1, cache_keys));
   __ j(not_equal, &slow);
 
   // Get field offset.
   // edx     : receiver
   // ebx     : receiver's map
-  // eax     : key
-  // ecx     : lookup cache index
+  // ecx     : key
+  // eax     : lookup cache index
   ExternalReference cache_field_offsets =
       ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
 
@@ -580,12 +572,12 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
   for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
     __ bind(&hit_on_nth_entry[i]);
     if (i != 0) {
-      __ add(ecx, Immediate(i));
+      __ add(eax, Immediate(i));
     }
     __ mov(edi,
-           Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
-    __ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
-    __ sub(edi, ecx);
+           Operand::StaticArray(eax, times_pointer_size, cache_field_offsets));
+    __ movzx_b(eax, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
+    __ sub(edi, eax);
     __ j(above_equal, &property_array_property);
     if (i != 0) {
       __ jmp(&load_in_object_property);
@@ -594,9 +586,9 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
 
   // Load in-object property.
   __ bind(&load_in_object_property);
-  __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
-  __ add(ecx, edi);
-  __ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
+  __ movzx_b(eax, FieldOperand(ebx, Map::kInstanceSizeOffset));
+  __ add(eax, edi);
+  __ mov(eax, FieldOperand(edx, eax, times_pointer_size, 0));
   __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
   __ ret(0);
 
@@ -612,16 +604,16 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
   // exists.
   __ bind(&probe_dictionary);
 
-  __ mov(ecx, FieldOperand(edx, JSObject::kMapOffset));
-  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-  GenerateGlobalInstanceTypeCheck(masm, ecx, &slow);
+  __ mov(eax, FieldOperand(edx, JSObject::kMapOffset));
+  __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
+  GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
 
-  GenerateDictionaryLoad(masm, &slow, ebx, eax, ecx, edi, eax);
+  GenerateDictionaryLoad(masm, &slow, ebx, ecx, eax, edi, eax);
   __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
   __ ret(0);
 
   __ bind(&index_string);
-  __ IndexFromHash(ebx, eax);
+  __ IndexFromHash(ebx, ecx);
   // Now jump to the place where smi keys are handled.
   __ jmp(&index_smi);
 }
@@ -629,15 +621,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
 
 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- eax    : key (index)
+  //  -- ecx    : key (index)
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
   Label miss;
 
   Register receiver = edx;
-  Register index = eax;
-  Register scratch = ecx;
+  Register index = ecx;
+  Register scratch = ebx;
   Register result = eax;
 
   StringCharAtGenerator char_at_generator(receiver,
@@ -661,7 +653,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
 
 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- eax    : key
+  //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
@@ -671,24 +663,24 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
   __ JumpIfSmi(edx, &slow);
 
   // Check that the key is an array index, that is Uint32.
-  __ test(eax, Immediate(kSmiTagMask | kSmiSignMask));
+  __ test(ecx, Immediate(kSmiTagMask | kSmiSignMask));
   __ j(not_zero, &slow);
 
   // Get the map of the receiver.
-  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+  __ mov(eax, FieldOperand(edx, HeapObject::kMapOffset));
 
   // Check that it has indexed interceptor and access checks
   // are not enabled for this object.
-  __ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset));
-  __ and_(ecx, Immediate(kSlowCaseBitFieldMask));
-  __ cmp(ecx, Immediate(1 << Map::kHasIndexedInterceptor));
+  __ movzx_b(eax, FieldOperand(eax, Map::kBitFieldOffset));
+  __ and_(eax, Immediate(kSlowCaseBitFieldMask));
+  __ cmp(eax, Immediate(1 << Map::kHasIndexedInterceptor));
   __ j(not_zero, &slow);
 
   // Everything is fine, call runtime.
-  __ pop(ecx);
+  __ pop(eax);
   __ push(edx);  // receiver
-  __ push(eax);  // key
-  __ push(ecx);  // return address
+  __ push(ecx);  // key
+  __ push(eax);  // return address
 
   // Perform tail call to the entry.
   ExternalReference ref =
@@ -703,20 +695,20 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
 
 void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- eax    : key
+  //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
   Label slow, notin;
   Factory* factory = masm->isolate()->factory();
   Operand mapped_location =
-      GenerateMappedArgumentsLookup(masm, edx, eax, ebx, ecx, &notin, &slow);
+      GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, eax, &notin, &slow);
   __ mov(eax, mapped_location);
   __ Ret();
   __ bind(&notin);
   // The unmapped lookup expects that the parameter map is in ebx.
   Operand unmapped_location =
-      GenerateUnmappedArgumentsLookup(masm, eax, ebx, ecx, &slow);
+      GenerateUnmappedArgumentsLookup(masm, ecx, ebx, eax, &slow);
   __ cmp(unmapped_location, factory->the_hole_value());
   __ j(equal, &slow);
   __ mov(eax, unmapped_location);
@@ -1308,15 +1300,15 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
 
 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- eax    : receiver
   //  -- ecx    : name
+  //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
 
   // Probe the stub cache.
   Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
-  Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, eax, ecx, ebx,
-                                                  edx);
+  Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
+                                                  eax);
 
   // Cache miss: Jump to runtime.
   GenerateMiss(masm);
@@ -1325,17 +1317,17 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
 
 void LoadIC::GenerateNormal(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- eax    : receiver
   //  -- ecx    : name
+  //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
   Label miss;
 
-  GenerateStringDictionaryReceiverCheck(masm, eax, edx, ebx, &miss);
+  GenerateStringDictionaryReceiverCheck(masm, edx, eax, ebx, &miss);
 
-  // edx: elements
+  // eax: elements
   // Search the dictionary placing the result in eax.
-  GenerateDictionaryLoad(masm, &miss, edx, ecx, edi, ebx, eax);
+  GenerateDictionaryLoad(masm, &miss, eax, ecx, edi, ebx, eax);
   __ ret(0);
 
   // Cache miss: Jump to runtime.
@@ -1346,15 +1338,15 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
 
 void LoadIC::GenerateMiss(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- eax    : receiver
   //  -- ecx    : name
+  //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
 
   __ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
 
   __ pop(ebx);
-  __ push(eax);  // receiver
+  __ push(edx);  // receiver
   __ push(ecx);  // name
   __ push(ebx);  // return address
 
@@ -1367,7 +1359,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
 
 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
   // ----------- S t a t e -------------
-  //  -- eax    : key
+  //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
@@ -1376,7 +1368,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
 
   __ pop(ebx);
   __ push(edx);  // receiver
-  __ push(eax);  // name
+  __ push(ecx);  // name
   __ push(ebx);  // return address
 
   // Perform tail call to the entry.
@@ -1390,14 +1382,14 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
 
 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- eax    : key
+  //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
 
   __ pop(ebx);
   __ push(edx);  // receiver
-  __ push(eax);  // name
+  __ push(ecx);  // name
   __ push(ebx);  // return address
 
   // Perform tail call to the entry.
@@ -1735,12 +1727,12 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
 
   // Activate inlined smi code.
   if (previous_state == UNINITIALIZED) {
-    PatchInlinedSmiCode(address());
+    PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
   }
 }
 
 
-void PatchInlinedSmiCode(Address address) {
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
   // The address of the instruction following the call.
   Address test_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
@@ -1761,14 +1753,18 @@ void PatchInlinedSmiCode(Address address) {
            address, test_instruction_address, delta);
   }
 
-  // Patch with a short conditional jump. There must be a
-  // short jump-if-carry/not-carry at this position.
+  // Patch with a short conditional jump. Enabling means switching from a short
+  // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
+  // reverse operation of that.
   Address jmp_address = test_instruction_address - delta;
-  ASSERT(*jmp_address == Assembler::kJncShortOpcode ||
-         *jmp_address == Assembler::kJcShortOpcode);
-  Condition cc = *jmp_address == Assembler::kJncShortOpcode
-      ? not_zero
-      : zero;
+  ASSERT((check == ENABLE_INLINED_SMI_CHECK)
+         ? (*jmp_address == Assembler::kJncShortOpcode ||
+            *jmp_address == Assembler::kJcShortOpcode)
+         : (*jmp_address == Assembler::kJnzShortOpcode ||
+            *jmp_address == Assembler::kJzShortOpcode));
+  Condition cc = (check == ENABLE_INLINED_SMI_CHECK)
+      ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
+      : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
   *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
 }
 
index 2b68539..fa58146 100644 (file)
@@ -186,13 +186,12 @@ bool LCodeGen::GeneratePrologue() {
 
   // Possibly allocate a local context.
   int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
-  if (heap_slots > 0 ||
-      (scope()->is_qml_mode() && scope()->is_global_scope())) {
+  if (heap_slots > 0) {
     Comment(";;; Allocate local context");
     // Argument to NewContext is the function, which is still in edi.
     __ push(edi);
     if (heap_slots <= FastNewContextStub::kMaximumSlots) {
-      FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
+      FastNewContextStub stub(heap_slots);
       __ CallStub(&stub);
     } else {
       __ CallRuntime(Runtime::kNewFunctionContext, 1);
@@ -2060,8 +2059,9 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                   RelocInfo::CODE_TARGET,
                   instr,
                   RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
-  ASSERT(instr->HasDeoptimizationEnvironment());
-  LEnvironment* env = instr->deoptimization_environment();
+  // Get the deoptimization index of the LLazyBailout-environment that
+  // corresponds to this instruction.
+  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
 
   // Put the result value into the eax slot and restore all registers.
@@ -2115,7 +2115,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
 
 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->global_object()).is(eax));
+  ASSERT(ToRegister(instr->global_object()).is(edx));
   ASSERT(ToRegister(instr->result()).is(eax));
 
   __ mov(ecx, instr->name());
@@ -2274,46 +2274,41 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
   Register result = ToRegister(instr->result());
 
   int map_count = instr->hydrogen()->types()->length();
+  bool need_generic = instr->hydrogen()->need_generic();
+
+  if (map_count == 0 && !need_generic) {
+    DeoptimizeIf(no_condition, instr->environment());
+    return;
+  }
   Handle<String> name = instr->hydrogen()->name();
-  if (map_count == 0) {
-    ASSERT(instr->hydrogen()->need_generic());
-    __ mov(ecx, name);
-    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-    CallCode(ic, RelocInfo::CODE_TARGET, instr);
-  } else {
-    Label done;
-    for (int i = 0; i < map_count - 1; ++i) {
-      Handle<Map> map = instr->hydrogen()->types()->at(i);
+  Label done;
+  for (int i = 0; i < map_count; ++i) {
+    bool last = (i == map_count - 1);
+    Handle<Map> map = instr->hydrogen()->types()->at(i);
+    __ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
+    if (last && !need_generic) {
+      DeoptimizeIf(not_equal, instr->environment());
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
+    } else {
       Label next;
-      __ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
       __ j(not_equal, &next, Label::kNear);
       EmitLoadFieldOrConstantFunction(result, object, map, name);
       __ jmp(&done, Label::kNear);
       __ bind(&next);
     }
-    Handle<Map> map = instr->hydrogen()->types()->last();
-    __ cmp(FieldOperand(object, HeapObject::kMapOffset), map);
-    if (instr->hydrogen()->need_generic()) {
-      Label generic;
-      __ j(not_equal, &generic, Label::kNear);
-      EmitLoadFieldOrConstantFunction(result, object, map, name);
-      __ jmp(&done, Label::kNear);
-      __ bind(&generic);
-      __ mov(ecx, name);
-      Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-      CallCode(ic, RelocInfo::CODE_TARGET, instr);
-    } else {
-      DeoptimizeIf(not_equal, instr->environment());
-      EmitLoadFieldOrConstantFunction(result, object, map, name);
-    }
-    __ bind(&done);
   }
+  if (need_generic) {
+    __ mov(ecx, name);
+    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+    CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  }
+  __ bind(&done);
 }
 
 
 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
   ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->object()).is(eax));
+  ASSERT(ToRegister(instr->object()).is(edx));
   ASSERT(ToRegister(instr->result()).is(eax));
 
   __ mov(ecx, instr->name());
@@ -2426,9 +2421,11 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
 
   // Load the result.
   __ mov(result,
-         BuildFastArrayOperand(instr->elements(), instr->key(),
+         BuildFastArrayOperand(instr->elements(),
+                               instr->key(),
                                FAST_ELEMENTS,
-                               FixedArray::kHeaderSize - kHeapObjectTag));
+                               FixedArray::kHeaderSize - kHeapObjectTag,
+                               instr->additional_index()));
 
   // Check for the hole value.
   if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -2447,13 +2444,17 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
   Operand hole_check_operand = BuildFastArrayOperand(
       instr->elements(), instr->key(),
       FAST_DOUBLE_ELEMENTS,
-      offset);
+      offset,
+      instr->additional_index());
   __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
   DeoptimizeIf(equal, instr->environment());
 
   Operand double_load_operand = BuildFastArrayOperand(
-      instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
-      FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+      instr->elements(),
+      instr->key(),
+      FAST_DOUBLE_ELEMENTS,
+      FixedDoubleArray::kHeaderSize - kHeapObjectTag,
+      instr->additional_index());
   __ movdbl(result, double_load_operand);
 }
 
@@ -2462,7 +2463,8 @@ Operand LCodeGen::BuildFastArrayOperand(
     LOperand* elements_pointer,
     LOperand* key,
     ElementsKind elements_kind,
-    uint32_t offset) {
+    uint32_t offset,
+    uint32_t additional_index) {
   Register elements_pointer_reg = ToRegister(elements_pointer);
   int shift_size = ElementsKindToShiftSize(elements_kind);
   if (key->IsConstantOperand()) {
@@ -2471,10 +2473,14 @@ Operand LCodeGen::BuildFastArrayOperand(
       Abort("array index constant value too big");
     }
     return Operand(elements_pointer_reg,
-                   constant_value * (1 << shift_size) + offset);
+                   ((constant_value + additional_index) << shift_size)
+                       + offset);
   } else {
     ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
-    return Operand(elements_pointer_reg, ToRegister(key), scale_factor, offset);
+    return Operand(elements_pointer_reg,
+                   ToRegister(key),
+                   scale_factor,
+                   offset + (additional_index << shift_size));
   }
 }
 
@@ -2483,7 +2489,10 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
     LLoadKeyedSpecializedArrayElement* instr) {
   ElementsKind elements_kind = instr->elements_kind();
   Operand operand(BuildFastArrayOperand(instr->external_pointer(),
-                                        instr->key(), elements_kind, 0));
+                                        instr->key(),
+                                        elements_kind,
+                                        0,
+                                        instr->additional_index()));
   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
     XMMRegister result(ToDoubleRegister(instr->result()));
     __ movss(result, operand);
@@ -2534,7 +2543,7 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
   ASSERT(ToRegister(instr->context()).is(esi));
   ASSERT(ToRegister(instr->object()).is(edx));
-  ASSERT(ToRegister(instr->key()).is(eax));
+  ASSERT(ToRegister(instr->key()).is(ecx));
 
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -2544,25 +2553,29 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   Register result = ToRegister(instr->result());
 
-  // Check for arguments adapter frame.
-  Label done, adapted;
-  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
-  __ cmp(Operand(result),
-         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(equal, &adapted, Label::kNear);
-
-  // No arguments adaptor frame.
-  __ mov(result, Operand(ebp));
-  __ jmp(&done, Label::kNear);
+  if (instr->hydrogen()->from_inlined()) {
+    __ lea(result, Operand(esp, -2 * kPointerSize));
+  } else {
+    // Check for arguments adapter frame.
+    Label done, adapted;
+    __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+    __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
+    __ cmp(Operand(result),
+           Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+    __ j(equal, &adapted, Label::kNear);
+
+    // No arguments adaptor frame.
+    __ mov(result, Operand(ebp));
+    __ jmp(&done, Label::kNear);
 
-  // Arguments adaptor frame present.
-  __ bind(&adapted);
-  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+    // Arguments adaptor frame present.
+    __ bind(&adapted);
+    __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
 
-  // Result is the frame pointer for the frame if not adapted and for the real
-  // frame below the adaptor frame if adapted.
-  __ bind(&done);
+    // Result is the frame pointer for the frame if not adapted and for the real
+    // frame below the adaptor frame if adapted.
+    __ bind(&done);
+  }
 }
 
 
@@ -2667,7 +2680,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
 
   // Invoke the function.
   __ bind(&invoke);
-  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  ASSERT(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
   SafepointGenerator safepoint_generator(
@@ -2684,6 +2697,11 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
 }
 
 
+void LCodeGen::DoDrop(LDrop* instr) {
+  __ Drop(instr->count());
+}
+
+
 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   Register result = ToRegister(instr->result());
   __ LoadHeapObject(result, instr->hydrogen()->closure());
@@ -2716,7 +2734,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
-  __ mov(result, Operand(context, Context::SlotOffset(instr->qml_global()?Context::QML_GLOBAL_INDEX:Context::GLOBAL_INDEX)));
+  __ mov(result, Operand(context, Context::SlotOffset(Context::GLOBAL_INDEX)));
 }
 
 
@@ -2730,7 +2748,8 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
                                  int arity,
                                  LInstruction* instr,
-                                 CallKind call_kind) {
+                                 CallKind call_kind,
+                                 EDIState edi_state) {
   bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
       function->shared()->formal_parameter_count() == arity;
 
@@ -2738,7 +2757,9 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
   RecordPosition(pointers->position());
 
   if (can_invoke_directly) {
-    __ LoadHeapObject(edi, function);
+    if (edi_state == EDI_UNINITIALIZED) {
+      __ LoadHeapObject(edi, function);
+    }
 
     // Change context if needed.
     bool change_context =
@@ -2781,7 +2802,8 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   CallKnownFunction(instr->function(),
                     instr->arity(),
                     instr,
-                    CALL_AS_METHOD);
+                    CALL_AS_METHOD,
+                    EDI_UNINITIALIZED);
 }
 
 
@@ -2912,11 +2934,13 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
     __ cmp(output_reg, 0x80000000u);
     DeoptimizeIf(equal, instr->environment());
   } else {
+    Label negative_sign;
     Label done;
-    // Deoptimize on negative numbers.
+    // Deoptimize on unordered.
     __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
     __ ucomisd(input_reg, xmm_scratch);
-    DeoptimizeIf(below, instr->environment());
+    DeoptimizeIf(parity_even, instr->environment());
+    __ j(below, &negative_sign, Label::kNear);
 
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
       // Check for negative zero.
@@ -2932,10 +2956,21 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
 
     // Use truncating instruction (OK because input is positive).
     __ cvttsd2si(output_reg, Operand(input_reg));
-
     // Overflow is signalled with minint.
     __ cmp(output_reg, 0x80000000u);
     DeoptimizeIf(equal, instr->environment());
+    __ jmp(&done, Label::kNear);
+
+    // Non-zero negative reaches here
+    __ bind(&negative_sign);
+    // Truncate, then compare and compensate
+    __ cvttsd2si(output_reg, Operand(input_reg));
+    __ cvtsi2sd(xmm_scratch, output_reg);
+    __ ucomisd(input_reg, xmm_scratch);
+    __ j(equal, &done, Label::kNear);
+    __ sub(output_reg, Immediate(1));
+    DeoptimizeIf(overflow, instr->environment());
+
     __ bind(&done);
   }
 }
@@ -3227,13 +3262,21 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
   ASSERT(ToRegister(instr->context()).is(esi));
   ASSERT(ToRegister(instr->function()).is(edi));
   ASSERT(instr->HasPointerMap());
-  ASSERT(instr->HasDeoptimizationEnvironment());
-  LPointerMap* pointers = instr->pointer_map();
-  RecordPosition(pointers->position());
-  SafepointGenerator generator(
-      this, pointers, Safepoint::kLazyDeopt);
-  ParameterCount count(instr->arity());
-  __ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+
+  if (instr->known_function().is_null()) {
+    LPointerMap* pointers = instr->pointer_map();
+    RecordPosition(pointers->position());
+    SafepointGenerator generator(
+        this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(instr->arity());
+    __ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+  } else {
+    CallKnownFunction(instr->known_function(),
+                      instr->arity(),
+                      instr,
+                      CALL_AS_METHOD,
+                      EDI_CONTAINS_TARGET);
+  }
 }
 
 
@@ -3288,7 +3331,11 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
 
 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   ASSERT(ToRegister(instr->result()).is(eax));
-  CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
+  CallKnownFunction(instr->target(),
+                    instr->arity(),
+                    instr,
+                    CALL_AS_FUNCTION,
+                    EDI_UNINITIALIZED);
 }
 
 
@@ -3382,7 +3429,10 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
     LStoreKeyedSpecializedArrayElement* instr) {
   ElementsKind elements_kind = instr->elements_kind();
   Operand operand(BuildFastArrayOperand(instr->external_pointer(),
-                                        instr->key(), elements_kind, 0));
+                                        instr->key(),
+                                        elements_kind,
+                                        0,
+                                        instr->additional_index()));
   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
     __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
     __ movss(operand, xmm0);
@@ -3423,31 +3473,21 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
   Register elements = ToRegister(instr->object());
   Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
 
-  // Do the store.
-  if (instr->key()->IsConstantOperand()) {
-    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
-    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
-    int offset =
-        ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
-    __ mov(FieldOperand(elements, offset), value);
-  } else {
-    __ mov(FieldOperand(elements,
-                        key,
-                        times_pointer_size,
-                        FixedArray::kHeaderSize),
-           value);
-  }
+  Operand operand = BuildFastArrayOperand(
+      instr->object(),
+      instr->key(),
+      FAST_ELEMENTS,
+      FixedArray::kHeaderSize - kHeapObjectTag,
+      instr->additional_index());
+  __ mov(operand, value);
 
   if (instr->hydrogen()->NeedsWriteBarrier()) {
+    ASSERT(!instr->key()->IsConstantOperand());
     HType type = instr->hydrogen()->value()->type();
     SmiCheck check_needed =
         type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     // Compute address of modified element and store it into key register.
-    __ lea(key,
-           FieldOperand(elements,
-                        key,
-                        times_pointer_size,
-                        FixedArray::kHeaderSize));
+    __ lea(key, operand);
     __ RecordWrite(elements,
                    key,
                    value,
@@ -3461,19 +3501,25 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
 void LCodeGen::DoStoreKeyedFastDoubleElement(
     LStoreKeyedFastDoubleElement* instr) {
   XMMRegister value = ToDoubleRegister(instr->value());
-  Label have_value;
 
-  __ ucomisd(value, value);
-  __ j(parity_odd, &have_value);  // NaN.
+  if (instr->NeedsCanonicalization()) {
+    Label have_value;
 
-  ExternalReference canonical_nan_reference =
-      ExternalReference::address_of_canonical_non_hole_nan();
-  __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
-  __ bind(&have_value);
+    __ ucomisd(value, value);
+    __ j(parity_odd, &have_value);  // NaN.
+
+    ExternalReference canonical_nan_reference =
+        ExternalReference::address_of_canonical_non_hole_nan();
+    __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
+    __ bind(&have_value);
+  }
 
   Operand double_store_operand = BuildFastArrayOperand(
-      instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
-      FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+      instr->elements(),
+      instr->key(),
+      FAST_DOUBLE_ELEMENTS,
+      FixedDoubleArray::kHeaderSize - kHeapObjectTag,
+      instr->additional_index());
   __ movdbl(double_store_operand, value);
 }
 
@@ -4434,6 +4480,13 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
     __ LoadHeapObject(ecx, object);
     __ cmp(source, ecx);
     __ Assert(equal, "Unexpected object literal boilerplate");
+    __ mov(ecx, FieldOperand(source, HeapObject::kMapOffset));
+    __ cmp(ecx, Handle<Map>(object->map()));
+    __ Assert(equal, "Unexpected boilerplate map");
+    __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
+    __ and_(ecx, Map::kElementsKindMask);
+    __ cmp(ecx, object->GetElementsKind() << Map::kElementsKindShift);
+    __ Assert(equal, "Unexpected boilerplate elements kind");
   }
 
   // Only elements backing stores for non-COW arrays need to be copied.
@@ -4503,9 +4556,10 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
         __ mov(FieldOperand(result, total_offset + 4), Immediate(value_high));
       }
     } else if (elements->IsFixedArray()) {
+      Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
       for (int i = 0; i < elements_length; i++) {
         int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
-        Handle<Object> value = JSObject::GetElement(object, i);
+        Handle<Object> value(fast_elements->get(i));
         if (value->IsJSObject()) {
           Handle<JSObject> value_object = Handle<JSObject>::cast(value);
           __ lea(ecx, Operand(result, *offset));
@@ -4529,6 +4583,23 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
 void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
   ASSERT(ToRegister(instr->context()).is(esi));
   int size = instr->hydrogen()->total_size();
+  ElementsKind boilerplate_elements_kind =
+      instr->hydrogen()->boilerplate()->GetElementsKind();
+
+  // Deopt if the literal boilerplate ElementsKind is of a type different than
+  // the expected one. The check isn't necessary if the boilerplate has already
+  // been converted to FAST_ELEMENTS.
+  if (boilerplate_elements_kind != FAST_ELEMENTS) {
+    __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
+    __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
+    // Load the map's "bit field 2". We only need the first byte,
+    // but the following masking takes care of that anyway.
+    __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
+    // Retrieve elements_kind from bit field 2.
+    __ and_(ecx, Map::kElementsKindMask);
+    __ cmp(ecx, boilerplate_elements_kind << Map::kElementsKindShift);
+    DeoptimizeIf(not_equal, instr->environment());
+  }
 
   // Allocate all objects that are part of the literal in one big
   // allocation. This avoids multiple limit checks.
@@ -4813,7 +4884,7 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
   LOperand* key = instr->key();
   __ push(ToOperand(obj));
   EmitPushTaggedOperand(key);
-  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  ASSERT(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
   // Create safepoint generator that will also ensure enough space in the
@@ -4911,7 +4982,7 @@ void LCodeGen::DoIn(LIn* instr) {
   LOperand* key = instr->key();
   EmitPushTaggedOperand(key);
   EmitPushTaggedOperand(obj);
-  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  ASSERT(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
   SafepointGenerator safepoint_generator(
index 52befc6..392bca2 100644 (file)
@@ -206,12 +206,18 @@ class LCodeGen BASE_EMBEDDED {
                                LInstruction* instr,
                                LOperand* context);
 
+  enum EDIState {
+    EDI_UNINITIALIZED,
+    EDI_CONTAINS_TARGET
+  };
+
   // Generate a direct call to a known function.  Expects the function
   // to be in edi.
   void CallKnownFunction(Handle<JSFunction> function,
                          int arity,
                          LInstruction* instr,
-                         CallKind call_kind);
+                         CallKind call_kind,
+                         EDIState edi_state);
 
   void RecordSafepointWithLazyDeopt(LInstruction* instr,
                                     SafepointMode safepoint_mode);
@@ -236,7 +242,8 @@ class LCodeGen BASE_EMBEDDED {
   Operand BuildFastArrayOperand(LOperand* elements_pointer,
                                 LOperand* key,
                                 ElementsKind elements_kind,
-                                uint32_t offset);
+                                uint32_t offset,
+                                uint32_t additional_index = 0);
 
   // Specific math operations - used from DoUnaryMathOperation.
   void EmitIntegerMathAbs(LUnaryMathOperation* instr);
index fe48521..d0cb230 100644 (file)
@@ -729,22 +729,6 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
 }
 
 
-LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
-    LInstruction* instr, int ast_id) {
-  ASSERT(instruction_pending_deoptimization_environment_ == NULL);
-  ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
-  instruction_pending_deoptimization_environment_ = instr;
-  pending_deoptimization_ast_id_ = ast_id;
-  return instr;
-}
-
-
-void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
-  instruction_pending_deoptimization_environment_ = NULL;
-  pending_deoptimization_ast_id_ = AstNode::kNoNumber;
-}
-
-
 LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
                                         HInstruction* hinstr,
                                         CanDeoptimize can_deoptimize) {
@@ -757,8 +741,10 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
   if (hinstr->HasObservableSideEffects()) {
     ASSERT(hinstr->next()->IsSimulate());
     HSimulate* sim = HSimulate::cast(hinstr->next());
-    instr = SetInstructionPendingDeoptimizationEnvironment(
-        instr, sim->ast_id());
+    ASSERT(instruction_pending_deoptimization_environment_ == NULL);
+    ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+    instruction_pending_deoptimization_environment_ = instr;
+    pending_deoptimization_ast_id_ = sim->ast_id();
   }
 
   // If instruction does not have side-effects lazy deoptimization
@@ -776,12 +762,6 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
 }
 
 
-LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
-  instr->MarkAsSaveDoubles();
-  return instr;
-}
-
-
 LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
   ASSERT(!instr->HasPointerMap());
   instr->set_pointer_map(new(zone()) LPointerMap(position_));
@@ -1163,7 +1143,7 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
 
 LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LGlobalObject(context, instr->qml_global()));
+  return DefineAsRegister(new(zone()) LGlobalObject(context));
 }
 
 
@@ -1253,7 +1233,7 @@ LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
 LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   argument_count_ -= instr->argument_count();
-  LCallGlobal* result = new(zone()) LCallGlobal(context, instr->qml_global());
+  LCallGlobal* result = new(zone()) LCallGlobal(context);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1330,6 +1310,7 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
 LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
   ASSERT(instr->value()->representation().IsInteger32());
   ASSERT(instr->representation().IsInteger32());
+  if (instr->HasNoUses()) return NULL;
   LOperand* input = UseRegisterAtStart(instr->value());
   LBitNotI* result = new(zone()) LBitNotI(input);
   return DefineSameAsFirst(result);
@@ -1354,6 +1335,12 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
 }
 
 
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+
 LInstruction* LChunkBuilder::DoMod(HMod* instr) {
   if (instr->representation().IsInteger32()) {
     ASSERT(instr->left()->representation().IsInteger32());
@@ -1862,7 +1849,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
 
 LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* global_object = UseFixed(instr->global_object(), eax);
+  LOperand* global_object = UseFixed(instr->global_object(), edx);
   LLoadGlobalGeneric* result =
       new(zone()) LLoadGlobalGeneric(context, global_object);
   return MarkAsCall(DefineFixed(result, eax), instr);
@@ -1922,7 +1909,7 @@ LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
   ASSERT(instr->representation().IsTagged());
   if (instr->need_generic()) {
     LOperand* context = UseFixed(instr->context(), esi);
-    LOperand* obj = UseFixed(instr->object(), eax);
+    LOperand* obj = UseFixed(instr->object(), edx);
     LLoadNamedFieldPolymorphic* result =
         new(zone()) LLoadNamedFieldPolymorphic(context, obj);
     return MarkAsCall(DefineFixed(result, eax), instr);
@@ -1938,7 +1925,7 @@ LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
 
 LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* object = UseFixed(instr->object(), eax);
+  LOperand* object = UseFixed(instr->object(), edx);
   LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(context, object);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
@@ -2003,8 +1990,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
   LOperand* external_pointer = UseRegister(instr->external_pointer());
   LOperand* key = UseRegisterOrConstant(instr->key());
   LLoadKeyedSpecializedArrayElement* result =
-      new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer,
-                                            key);
+      new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
   LInstruction* load_instr = DefineAsRegister(result);
   // An unsigned int array load might overflow and cause a deopt, make sure it
   // has an environment.
@@ -2017,7 +2003,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
 LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* object = UseFixed(instr->object(), edx);
-  LOperand* key = UseFixed(instr->key(), eax);
+  LOperand* key = UseFixed(instr->key(), ecx);
 
   LLoadKeyedGeneric* result =
       new(zone()) LLoadKeyedGeneric(context, object, key);
@@ -2348,9 +2334,12 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
     ASSERT(pending_deoptimization_ast_id_ == instr->ast_id());
     LLazyBailout* lazy_bailout = new(zone()) LLazyBailout;
     LInstruction* result = AssignEnvironment(lazy_bailout);
+    // Store the lazy deopt environment with the instruction if needed. Right
+    // now it is only used for LInstanceOfKnownGlobal.
     instruction_pending_deoptimization_environment_->
-        set_deoptimization_environment(result->environment());
-    ClearInstructionPendingDeoptimizationEnvironment();
+        SetDeferredLazyDeoptimizationEnvironment(result->environment());
+    instruction_pending_deoptimization_environment_ = NULL;
+    pending_deoptimization_ast_id_ = AstNode::kNoNumber;
     return result;
   }
 
@@ -2380,8 +2369,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
                                                undefined,
                                                instr->call_kind(),
                                                instr->is_construct());
-  if (instr->arguments() != NULL) {
-    inner->Bind(instr->arguments(), graph()->GetArgumentsObject());
+  if (instr->arguments_var() != NULL) {
+    inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
   }
   current_block_->UpdateEnvironment(inner);
   chunk_->AddInlinedClosure(instr->closure());
@@ -2390,10 +2379,20 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
 
 
 LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+  LInstruction* pop = NULL;
+
+  HEnvironment* env = current_block_->last_environment();
+
+  if (instr->arguments_pushed()) {
+    int argument_count = env->arguments_environment()->parameter_count();
+    pop = new(zone()) LDrop(argument_count);
+    argument_count_ -= argument_count;
+  }
+
   HEnvironment* outer = current_block_->last_environment()->
       DiscardInlined(false);
   current_block_->UpdateEnvironment(outer);
-  return NULL;
+  return pop;
 }
 
 
index 0db0a4b..be64b2f 100644 (file)
@@ -174,7 +174,8 @@ class LCodeGen;
   V(CheckMapValue)                              \
   V(LoadFieldByIndex)                           \
   V(DateField)                                  \
-  V(WrapReceiver)
+  V(WrapReceiver)                               \
+  V(Drop)
 
 
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
@@ -198,8 +199,7 @@ class LInstruction: public ZoneObject {
   LInstruction()
       : environment_(NULL),
         hydrogen_value_(NULL),
-        is_call_(false),
-        is_save_doubles_(false) { }
+        is_call_(false) { }
   virtual ~LInstruction() { }
 
   virtual void CompileToNative(LCodeGen* generator) = 0;
@@ -242,22 +242,12 @@ class LInstruction: public ZoneObject {
   void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
   HValue* hydrogen_value() const { return hydrogen_value_; }
 
-  void set_deoptimization_environment(LEnvironment* env) {
-    deoptimization_environment_.set(env);
-  }
-  LEnvironment* deoptimization_environment() const {
-    return deoptimization_environment_.get();
-  }
-  bool HasDeoptimizationEnvironment() const {
-    return deoptimization_environment_.is_set();
-  }
+  virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
 
   void MarkAsCall() { is_call_ = true; }
-  void MarkAsSaveDoubles() { is_save_doubles_ = true; }
 
   // Interface to the register allocator and iterators.
   bool IsMarkedAsCall() const { return is_call_; }
-  bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
 
   virtual bool HasResult() const = 0;
   virtual LOperand* result() = 0;
@@ -278,9 +268,7 @@ class LInstruction: public ZoneObject {
   LEnvironment* environment_;
   SetOncePointer<LPointerMap> pointer_map_;
   HValue* hydrogen_value_;
-  SetOncePointer<LEnvironment> deoptimization_environment_;
   bool is_call_;
-  bool is_save_doubles_;
 };
 
 
@@ -525,9 +513,8 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
 
 class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
  public:
-  LArgumentsElements() { }
-
   DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+  DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
 };
 
 
@@ -844,6 +831,15 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 2, 1> {
   DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
 
   Handle<JSFunction> function() const { return hydrogen()->function(); }
+  LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
+    return lazy_deopt_env_;
+  }
+  virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
+    lazy_deopt_env_ = env;
+  }
+
+ private:
+  LEnvironment* lazy_deopt_env_;
 };
 
 
@@ -1242,13 +1238,13 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
 
   LOperand* elements() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
+  uint32_t additional_index() const { return hydrogen()->index_offset(); }
 };
 
 
 class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
  public:
-  LLoadKeyedFastDoubleElement(LOperand* elements,
-                              LOperand* key) {
+  LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
     inputs_[0] = elements;
     inputs_[1] = key;
   }
@@ -1259,13 +1255,13 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
 
   LOperand* elements() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
+  uint32_t additional_index() const { return hydrogen()->index_offset(); }
 };
 
 
 class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
  public:
-  LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
-                                    LOperand* key) {
+  LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
     inputs_[0] = external_pointer;
     inputs_[1] = key;
   }
@@ -1279,6 +1275,7 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
   ElementsKind elements_kind() const {
     return hydrogen()->elements_kind();
   }
+  uint32_t additional_index() const { return hydrogen()->index_offset(); }
 };
 
 
@@ -1401,6 +1398,19 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
 };
 
 
+class LDrop: public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LDrop(int count) : count_(count) { }
+
+  int count() const { return count_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+  int count_;
+};
+
+
 class LThisFunction: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
@@ -1439,17 +1449,13 @@ class LDeclareGlobals: public LTemplateInstruction<0, 1, 0> {
 
 class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
  public:
-  explicit LGlobalObject(LOperand* context, bool qml_global) {
+  explicit LGlobalObject(LOperand* context) {
     inputs_[0] = context;
-    qml_global_ = qml_global;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
 
   LOperand* context() { return InputAt(0); }
-  bool qml_global() { return qml_global_; }
- private:
-  bool qml_global_;
 };
 
 
@@ -1493,6 +1499,7 @@ class LInvokeFunction: public LTemplateInstruction<1, 2, 0> {
   virtual void PrintDataTo(StringStream* stream);
 
   int arity() const { return hydrogen()->argument_count() - 1; }
+  Handle<JSFunction> known_function() { return hydrogen()->known_function(); }
 };
 
 
@@ -1550,7 +1557,7 @@ class LCallFunction: public LTemplateInstruction<1, 2, 0> {
 
 class LCallGlobal: public LTemplateInstruction<1, 1, 0> {
  public:
-  explicit LCallGlobal(LOperand* context, bool qml_global) : qml_global_(qml_global) {
+  explicit LCallGlobal(LOperand* context) {
     inputs_[0] = context;
   }
 
@@ -1562,10 +1569,6 @@ class LCallGlobal: public LTemplateInstruction<1, 1, 0> {
   LOperand* context() { return inputs_[0]; }
   Handle<String> name() const {return hydrogen()->name(); }
   int arity() const { return hydrogen()->argument_count() - 1; }
-
-  bool qml_global() { return qml_global_; }
- private:
-  bool qml_global_;
 };
 
 
@@ -1773,6 +1776,7 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
   LOperand* object() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
+  uint32_t additional_index() const { return hydrogen()->index_offset(); }
 };
 
 
@@ -1795,6 +1799,9 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
   LOperand* elements() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
+  uint32_t additional_index() const { return hydrogen()->index_offset(); }
+
+  bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
 };
 
 
@@ -1818,6 +1825,7 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
   ElementsKind elements_kind() const {
     return hydrogen()->elements_kind();
   }
+  uint32_t additional_index() const { return hydrogen()->index_offset(); }
 };
 
 
@@ -2479,11 +2487,6 @@ class LChunkBuilder BASE_EMBEDDED {
       LInstruction* instr,
       HInstruction* hinstr,
       CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
-  LInstruction* MarkAsSaveDoubles(LInstruction* instr);
-
-  LInstruction* SetInstructionPendingDeoptimizationEnvironment(
-      LInstruction* instr, int ast_id);
-  void ClearInstructionPendingDeoptimizationEnvironment();
 
   LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
                                   int* argument_index_accumulator);
index 60e38a6..c31b0c2 100644 (file)
@@ -2566,7 +2566,7 @@ bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
 CodePatcher::CodePatcher(byte* address, int size)
     : address_(address),
       size_(size),
-      masm_(Isolate::Current(), address, size + Assembler::kGap) {
+      masm_(NULL, address, size + Assembler::kGap) {
   // Create a new macro assembler pointing to the address of the code to patch.
   // The size is adjusted with kGap on order for the assembler to generate size
   // bytes of instructions without failing with buffer size constraints.
index 1cc9142..66d1ce7 100644 (file)
@@ -946,9 +946,6 @@ inline Operand GlobalObjectOperand() {
   return ContextOperand(esi, Context::GLOBAL_INDEX);
 }
 
-static inline Operand QmlGlobalObjectOperand() {
-  return ContextOperand(esi, Context::QML_GLOBAL_INDEX);
-}
 
 // Generates an Operand for saving parameters after PrepareCallApiFunction.
 Operand ApiParameterOperand(int index);
index fd26779..e148e2f 100644 (file)
@@ -406,6 +406,7 @@ static void PushInterceptorArguments(MacroAssembler* masm,
   __ push(receiver);
   __ push(holder);
   __ push(FieldOperand(scratch, InterceptorInfo::kDataOffset));
+  __ push(Immediate(reinterpret_cast<int>(masm->isolate())));
 }
 
 
@@ -419,12 +420,12 @@ static void CompileCallLoadPropertyWithInterceptor(
   __ CallExternalReference(
       ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
                         masm->isolate()),
-      5);
+      6);
 }
 
 
 // Number of pointers to be reserved on stack for fast API call.
-static const int kFastApiCallArguments = 3;
+static const int kFastApiCallArguments = 4;
 
 
 // Reserves space for the extra arguments to API function in the
@@ -472,10 +473,11 @@ static void GenerateFastApiCall(MacroAssembler* masm,
   //  -- esp[8]              : api function
   //                           (first fast api call extra argument)
   //  -- esp[12]             : api call data
-  //  -- esp[16]             : last argument
+  //  -- esp[16]             : isolate
+  //  -- esp[20]             : last argument
   //  -- ...
-  //  -- esp[(argc + 3) * 4] : first argument
-  //  -- esp[(argc + 4) * 4] : receiver
+  //  -- esp[(argc + 4) * 4] : first argument
+  //  -- esp[(argc + 5) * 4] : receiver
   // -----------------------------------
   // Get the function and setup the context.
   Handle<JSFunction> function = optimization.constant_function();
@@ -493,9 +495,11 @@ static void GenerateFastApiCall(MacroAssembler* masm,
   } else {
     __ mov(Operand(esp, 3 * kPointerSize), Immediate(call_data));
   }
+  __ mov(Operand(esp, 4 * kPointerSize),
+         Immediate(reinterpret_cast<int>(masm->isolate())));
 
   // Prepare arguments.
-  __ lea(eax, Operand(esp, 3 * kPointerSize));
+  __ lea(eax, Operand(esp, 4 * kPointerSize));
 
   const int kApiArgc = 1;  // API function gets reference to the v8::Arguments.
 
@@ -679,7 +683,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
     __ CallExternalReference(
         ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
                           masm->isolate()),
-        5);
+        6);
 
     // Restore the name_ register.
     __ pop(name_);
@@ -746,8 +750,10 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
                                       Register scratch,
                                       Label* miss_label) {
   // Check that the map of the object hasn't changed.
+  CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
+                                             : REQUIRE_EXACT_MAP;
   __ CheckMap(receiver_reg, Handle<Map>(object->map()),
-              miss_label, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+              miss_label, DO_SMI_CHECK, mode);
 
   // Perform global security token check if needed.
   if (object->IsJSGlobalProxy()) {
@@ -1032,6 +1038,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
   } else {
     __ push(Immediate(Handle<Object>(callback->data())));
   }
+  __ push(Immediate(reinterpret_cast<int>(isolate())));
 
   // Save a pointer to where we pushed the arguments pointer.
   // This will be passed as the const AccessorInfo& to the C++ callback.
@@ -1042,9 +1049,9 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
 
   __ push(scratch3);  // Restore return address.
 
-  // 3 elements array for v8::Arguments::values_, handler for name and pointer
+  // 4 elements array for v8::Arguments::values_, handler for name and pointer
   // to the values (it considered as smi in GC).
-  const int kStackSpace = 5;
+  const int kStackSpace = 6;
   const int kApiArgc = 2;
 
   __ PrepareCallApiFunction(kApiArgc);
@@ -1122,13 +1129,20 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
                                           name, miss);
     ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
 
+    // Preserve the receiver register explicitly whenever it is different from
+    // the holder and it is needed should the interceptor return without any
+    // result. The CALLBACKS case needs the receiver to be passed into C++ code,
+    // the FIELD case might cause a miss during the prototype check.
+    bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
+    bool must_preserve_receiver_reg = !receiver.is(holder_reg) &&
+        (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
+
     // Save necessary data before invoking an interceptor.
     // Requires a frame to make GC aware of pushed pointers.
     {
       FrameScope frame_scope(masm(), StackFrame::INTERNAL);
 
-      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-        // CALLBACKS case needs a receiver to be passed into C++ callback.
+      if (must_preserve_receiver_reg) {
         __ push(receiver);
       }
       __ push(holder_reg);
@@ -1151,10 +1165,17 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
       frame_scope.GenerateLeaveFrame();
       __ ret(0);
 
+      // Clobber registers when generating debug-code to provoke errors.
       __ bind(&interceptor_failed);
+      if (FLAG_debug_code) {
+        __ mov(receiver, Immediate(BitCast<int32_t>(kZapValue)));
+        __ mov(holder_reg, Immediate(BitCast<int32_t>(kZapValue)));
+        __ mov(name_reg, Immediate(BitCast<int32_t>(kZapValue)));
+      }
+
       __ pop(name_reg);
       __ pop(holder_reg);
-      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+      if (must_preserve_receiver_reg) {
         __ pop(receiver);
       }
 
@@ -1163,7 +1184,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
 
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into holder_reg.
-    if (*interceptor_holder != lookup->holder()) {
+    if (must_perfrom_prototype_check) {
       holder_reg = CheckPrototypes(interceptor_holder,
                                    holder_reg,
                                    Handle<JSObject>(lookup->holder()),
@@ -1197,6 +1218,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
       __ push(holder_reg);
       __ mov(holder_reg, Immediate(callback));
       __ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
+      __ push(Immediate(reinterpret_cast<int>(isolate())));
       __ push(holder_reg);
       __ push(name_reg);
       __ push(scratch2);  // restore return address
@@ -1204,7 +1226,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
       ExternalReference ref =
           ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
                             masm()->isolate());
-      __ TailCallExternalReference(ref, 5, 1);
+      __ TailCallExternalReference(ref, 6, 1);
     }
   } else {  // !compile_followup_inline
     // Call the runtime system to load the interceptor.
@@ -1220,7 +1242,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
     ExternalReference ref =
         ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
                           isolate());
-    __ TailCallExternalReference(ref, 5, 1);
+    __ TailCallExternalReference(ref, 6, 1);
   }
 }
 
@@ -2158,7 +2180,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
                   name, depth, &miss);
 
   // Move the return address on top of the stack.
-  __ mov(eax, Operand(esp, 3 * kPointerSize));
+  __ mov(eax, Operand(esp, 4 * kPointerSize));
   __ mov(Operand(esp, 0 * kPointerSize), eax);
 
   // esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
@@ -2687,27 +2709,27 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
                                                       Handle<JSObject> object,
                                                       Handle<JSObject> last) {
   // ----------- S t a t e -------------
-  //  -- eax    : receiver
   //  -- ecx    : name
+  //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
   Label miss;
 
   // Check that the receiver isn't a smi.
-  __ JumpIfSmi(eax, &miss);
+  __ JumpIfSmi(edx, &miss);
 
   ASSERT(last->IsGlobalObject() || last->HasFastProperties());
 
   // Check the maps of the full prototype chain. Also check that
   // global property cells up to (but not including) the last object
   // in the prototype chain are empty.
-  CheckPrototypes(object, eax, last, ebx, edx, edi, name, &miss);
+  CheckPrototypes(object, edx, last, ebx, eax, edi, name, &miss);
 
   // If the last object in the prototype chain is a global object,
   // check that the global property cell is empty.
   if (last->IsGlobalObject()) {
     GenerateCheckPropertyCell(
-        masm(), Handle<GlobalObject>::cast(last), name, edx, &miss);
+        masm(), Handle<GlobalObject>::cast(last), name, eax, &miss);
   }
 
   // Return undefined if maps of the full prototype chain are still the
@@ -2728,13 +2750,13 @@ Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
                                                 int index,
                                                 Handle<String> name) {
   // ----------- S t a t e -------------
-  //  -- eax    : receiver
   //  -- ecx    : name
+  //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
   Label miss;
 
-  GenerateLoadField(object, holder, eax, ebx, edx, edi, index, name, &miss);
+  GenerateLoadField(object, holder, edx, ebx, eax, edi, index, name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -2749,13 +2771,13 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
     Handle<JSObject> holder,
     Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
-  //  -- eax    : receiver
   //  -- ecx    : name
+  //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
   Label miss;
 
-  GenerateLoadCallback(object, holder, eax, ecx, ebx, edx, edi, callback,
+  GenerateLoadCallback(object, holder, edx, ecx, ebx, eax, edi, callback,
                        name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2770,13 +2792,13 @@ Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
                                                    Handle<JSFunction> value,
                                                    Handle<String> name) {
   // ----------- S t a t e -------------
-  //  -- eax    : receiver
   //  -- ecx    : name
+  //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
   Label miss;
 
-  GenerateLoadConstant(object, holder, eax, ebx, edx, edi, value, name, &miss);
+  GenerateLoadConstant(object, holder, edx, ebx, eax, edi, value, name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -2789,8 +2811,8 @@ Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
                                                       Handle<JSObject> holder,
                                                       Handle<String> name) {
   // ----------- S t a t e -------------
-  //  -- eax    : receiver
   //  -- ecx    : name
+  //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
   Label miss;
@@ -2800,7 +2822,7 @@ Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
 
   // TODO(368): Compile in the whole chain: all the interceptors in
   // prototypes and ultimate answer.
-  GenerateLoadInterceptor(receiver, holder, &lookup, eax, ecx, edx, ebx, edi,
+  GenerateLoadInterceptor(receiver, holder, &lookup, edx, ecx, eax, ebx, edi,
                           name, &miss);
 
   __ bind(&miss);
@@ -2818,15 +2840,15 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
     Handle<String> name,
     bool is_dont_delete) {
   // ----------- S t a t e -------------
-  //  -- eax    : receiver
   //  -- ecx    : name
+  //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
   Label miss;
 
   // Check that the maps haven't changed.
-  __ JumpIfSmi(eax, &miss);
-  CheckPrototypes(object, eax, holder, ebx, edx, edi, name, &miss);
+  __ JumpIfSmi(edx, &miss);
+  CheckPrototypes(object, edx, holder, ebx, eax, edi, name, &miss);
 
   // Get the value from the cell.
   if (Serializer::enabled()) {
@@ -2864,7 +2886,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
                                                      Handle<JSObject> holder,
                                                      int index) {
   // ----------- S t a t e -------------
-  //  -- eax    : key
+  //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
@@ -2874,10 +2896,10 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
   __ IncrementCounter(counters->keyed_load_field(), 1);
 
   // Check that the name has not changed.
-  __ cmp(eax, Immediate(name));
+  __ cmp(ecx, Immediate(name));
   __ j(not_equal, &miss);
 
-  GenerateLoadField(receiver, holder, edx, ebx, ecx, edi, index, name, &miss);
+  GenerateLoadField(receiver, holder, edx, ebx, eax, edi, index, name, &miss);
 
   __ bind(&miss);
   __ DecrementCounter(counters->keyed_load_field(), 1);
@@ -2894,7 +2916,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
     Handle<JSObject> holder,
     Handle<AccessorInfo> callback) {
   // ----------- S t a t e -------------
-  //  -- eax    : key
+  //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
@@ -2904,10 +2926,10 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
   __ IncrementCounter(counters->keyed_load_callback(), 1);
 
   // Check that the name has not changed.
-  __ cmp(eax, Immediate(name));
+  __ cmp(ecx, Immediate(name));
   __ j(not_equal, &miss);
 
-  GenerateLoadCallback(receiver, holder, edx, eax, ebx, ecx, edi, callback,
+  GenerateLoadCallback(receiver, holder, edx, ecx, ebx, eax, edi, callback,
                        name, &miss);
 
   __ bind(&miss);
@@ -2925,7 +2947,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
     Handle<JSObject> holder,
     Handle<JSFunction> value) {
   // ----------- S t a t e -------------
-  //  -- eax    : key
+  //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
@@ -2935,11 +2957,11 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
   __ IncrementCounter(counters->keyed_load_constant_function(), 1);
 
   // Check that the name has not changed.
-  __ cmp(eax, Immediate(name));
+  __ cmp(ecx, Immediate(name));
   __ j(not_equal, &miss);
 
   GenerateLoadConstant(
-      receiver, holder, edx, ebx, ecx, edi, value, name, &miss);
+      receiver, holder, edx, ebx, eax, edi, value, name, &miss);
   __ bind(&miss);
   __ DecrementCounter(counters->keyed_load_constant_function(), 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -2954,7 +2976,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
     Handle<JSObject> holder,
     Handle<String> name) {
   // ----------- S t a t e -------------
-  //  -- eax    : key
+  //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
@@ -2964,12 +2986,12 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
   __ IncrementCounter(counters->keyed_load_interceptor(), 1);
 
   // Check that the name has not changed.
-  __ cmp(eax, Immediate(name));
+  __ cmp(ecx, Immediate(name));
   __ j(not_equal, &miss);
 
   LookupResult lookup(isolate());
   LookupPostInterceptor(holder, name, &lookup);
-  GenerateLoadInterceptor(receiver, holder, &lookup, edx, eax, ecx, ebx, edi,
+  GenerateLoadInterceptor(receiver, holder, &lookup, edx, ecx, eax, ebx, edi,
                           name, &miss);
   __ bind(&miss);
   __ DecrementCounter(counters->keyed_load_interceptor(), 1);
@@ -2983,7 +3005,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
 Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
     Handle<String> name) {
   // ----------- S t a t e -------------
-  //  -- eax    : key
+  //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
@@ -2993,10 +3015,10 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
   __ IncrementCounter(counters->keyed_load_array_length(), 1);
 
   // Check that the name has not changed.
-  __ cmp(eax, Immediate(name));
+  __ cmp(ecx, Immediate(name));
   __ j(not_equal, &miss);
 
-  GenerateLoadArrayLength(masm(), edx, ecx, &miss);
+  GenerateLoadArrayLength(masm(), edx, eax, &miss);
   __ bind(&miss);
   __ DecrementCounter(counters->keyed_load_array_length(), 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -3009,7 +3031,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
 Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
     Handle<String> name) {
   // ----------- S t a t e -------------
-  //  -- eax    : key
+  //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
@@ -3019,10 +3041,10 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
   __ IncrementCounter(counters->keyed_load_string_length(), 1);
 
   // Check that the name has not changed.
-  __ cmp(eax, Immediate(name));
+  __ cmp(ecx, Immediate(name));
   __ j(not_equal, &miss);
 
-  GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss, true);
+  GenerateLoadStringLength(masm(), edx, eax, ebx, &miss, true);
   __ bind(&miss);
   __ DecrementCounter(counters->keyed_load_string_length(), 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -3035,7 +3057,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
 Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
     Handle<String> name) {
   // ----------- S t a t e -------------
-  //  -- eax    : key
+  //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
@@ -3045,10 +3067,10 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
   __ IncrementCounter(counters->keyed_load_function_prototype(), 1);
 
   // Check that the name has not changed.
-  __ cmp(eax, Immediate(name));
+  __ cmp(ecx, Immediate(name));
   __ j(not_equal, &miss);
 
-  GenerateLoadFunctionPrototype(masm(), edx, ecx, ebx, &miss);
+  GenerateLoadFunctionPrototype(masm(), edx, eax, ebx, &miss);
   __ bind(&miss);
   __ DecrementCounter(counters->keyed_load_function_prototype(), 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -3061,7 +3083,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
 Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
     Handle<Map> receiver_map) {
   // ----------- S t a t e -------------
-  //  -- eax    : key
+  //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
@@ -3082,7 +3104,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
     MapHandleList* receiver_maps,
     CodeHandleList* handler_ics) {
   // ----------- S t a t e -------------
-  //  -- eax    : key
+  //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
@@ -3246,7 +3268,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
 void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
     MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- eax    : key
+  //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
@@ -3254,21 +3276,15 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
-  __ JumpIfNotSmi(eax, &miss_force_generic);
-  __ mov(ebx, eax);
+  __ JumpIfNotSmi(ecx, &miss_force_generic);
+  __ mov(ebx, ecx);
   __ SmiUntag(ebx);
-  __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+  __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
 
   // Push receiver on the stack to free up a register for the dictionary
   // probing.
   __ push(edx);
-  __ LoadFromNumberDictionary(&slow,
-                              ecx,
-                              eax,
-                              ebx,
-                              edx,
-                              edi,
-                              eax);
+  __ LoadFromNumberDictionary(&slow, eax, ecx, ebx, edx, edi, eax);
   // Pop receiver before returning.
   __ pop(edx);
   __ ret(0);
@@ -3277,7 +3293,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
   __ pop(edx);
 
   // ----------- S t a t e -------------
-  //  -- eax    : value
   //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
@@ -3289,7 +3304,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
 
   __ bind(&miss_force_generic);
   // ----------- S t a t e -------------
-  //  -- eax    : value
   //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
@@ -3301,11 +3315,44 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
 }
 
 
+static void GenerateSmiKeyCheck(MacroAssembler* masm,
+                                Register key,
+                                Register scratch,
+                                XMMRegister xmm_scratch0,
+                                XMMRegister xmm_scratch1,
+                                Label* fail) {
+  // Check that key is a smi and if SSE2 is available a heap number
+  // containing a smi and branch if the check fails.
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope use_sse2(SSE2);
+    Label key_ok;
+    __ JumpIfSmi(key, &key_ok);
+    __ cmp(FieldOperand(key, HeapObject::kMapOffset),
+           Immediate(Handle<Map>(masm->isolate()->heap()->heap_number_map())));
+    __ j(not_equal, fail);
+    __ movdbl(xmm_scratch0, FieldOperand(key, HeapNumber::kValueOffset));
+    __ cvttsd2si(scratch, Operand(xmm_scratch0));
+    __ cvtsi2sd(xmm_scratch1, scratch);
+    __ ucomisd(xmm_scratch1, xmm_scratch0);
+    __ j(not_equal, fail);
+    __ j(parity_even, fail);  // NaN.
+    // Check if the key fits in the smi range.
+    __ cmp(scratch, 0xc0000000);
+    __ j(sign, fail);
+    __ SmiTag(scratch);
+    __ mov(key, scratch);
+    __ bind(&key_ok);
+  } else {
+    __ JumpIfNotSmi(key, fail);
+  }
+}
+
+
 void KeyedLoadStubCompiler::GenerateLoadExternalArray(
     MacroAssembler* masm,
     ElementsKind elements_kind) {
   // ----------- S t a t e -------------
-  //  -- eax    : key
+  //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
@@ -3314,41 +3361,41 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
 
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(eax, &miss_force_generic);
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
 
   // Check that the index is in range.
   __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-  __ cmp(eax, FieldOperand(ebx, ExternalArray::kLengthOffset));
+  __ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
   // Unsigned comparison catches both negative and too-large values.
   __ j(above_equal, &miss_force_generic);
   __ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
   // ebx: base pointer of external storage
   switch (elements_kind) {
     case EXTERNAL_BYTE_ELEMENTS:
-      __ SmiUntag(eax);  // Untag the index.
-      __ movsx_b(eax, Operand(ebx, eax, times_1, 0));
+      __ SmiUntag(ecx);  // Untag the index.
+      __ movsx_b(eax, Operand(ebx, ecx, times_1, 0));
       break;
     case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
     case EXTERNAL_PIXEL_ELEMENTS:
-      __ SmiUntag(eax);  // Untag the index.
-      __ movzx_b(eax, Operand(ebx, eax, times_1, 0));
+      __ SmiUntag(ecx);  // Untag the index.
+      __ movzx_b(eax, Operand(ebx, ecx, times_1, 0));
       break;
     case EXTERNAL_SHORT_ELEMENTS:
-      __ movsx_w(eax, Operand(ebx, eax, times_1, 0));
+      __ movsx_w(eax, Operand(ebx, ecx, times_1, 0));
       break;
     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-      __ movzx_w(eax, Operand(ebx, eax, times_1, 0));
+      __ movzx_w(eax, Operand(ebx, ecx, times_1, 0));
       break;
     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
     case EXTERNAL_INT_ELEMENTS:
-      __ mov(ecx, Operand(ebx, eax, times_2, 0));
+      __ mov(eax, Operand(ebx, ecx, times_2, 0));
       break;
     case EXTERNAL_FLOAT_ELEMENTS:
-      __ fld_s(Operand(ebx, eax, times_2, 0));
+      __ fld_s(Operand(ebx, ecx, times_2, 0));
       break;
     case EXTERNAL_DOUBLE_ELEMENTS:
-      __ fld_d(Operand(ebx, eax, times_4, 0));
+      __ fld_d(Operand(ebx, ecx, times_4, 0));
       break;
     default:
       UNREACHABLE();
@@ -3356,7 +3403,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
   }
 
   // For integer array types:
-  // ecx: value
+  // eax: value
   // For floating-point array type:
   // FP(0): value
 
@@ -3367,18 +3414,17 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
     // it to a HeapNumber.
     Label box_int;
     if (elements_kind == EXTERNAL_INT_ELEMENTS) {
-      __ cmp(ecx, 0xC0000000);
+      __ cmp(eax, 0xc0000000);
       __ j(sign, &box_int);
     } else {
       ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
       // The test is different for unsigned int values. Since we need
       // the value to be in the range of a positive smi, we can't
       // handle either of the top two bits being set in the value.
-      __ test(ecx, Immediate(0xC0000000));
+      __ test(eax, Immediate(0xc0000000));
       __ j(not_zero, &box_int);
     }
 
-    __ mov(eax, ecx);
     __ SmiTag(eax);
     __ ret(0);
 
@@ -3387,33 +3433,31 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
     // Allocate a HeapNumber for the int and perform int-to-double
     // conversion.
     if (elements_kind == EXTERNAL_INT_ELEMENTS) {
-      __ push(ecx);
+      __ push(eax);
       __ fild_s(Operand(esp, 0));
-      __ pop(ecx);
+      __ pop(eax);
     } else {
       ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
       // Need to zero-extend the value.
       // There's no fild variant for unsigned values, so zero-extend
       // to a 64-bit int manually.
       __ push(Immediate(0));
-      __ push(ecx);
+      __ push(eax);
       __ fild_d(Operand(esp, 0));
-      __ pop(ecx);
-      __ pop(ecx);
+      __ pop(eax);
+      __ pop(eax);
     }
     // FP(0): value
-    __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
+    __ AllocateHeapNumber(eax, ebx, edi, &failed_allocation);
     // Set the value.
-    __ mov(eax, ecx);
     __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
     __ ret(0);
   } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
              elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
     // For the floating-point array type, we need to always allocate a
     // HeapNumber.
-    __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
+    __ AllocateHeapNumber(eax, ebx, edi, &failed_allocation);
     // Set the value.
-    __ mov(eax, ecx);
     __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
     __ ret(0);
   } else {
@@ -3433,7 +3477,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
   __ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
 
   // ----------- S t a t e -------------
-  //  -- eax    : key
+  //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
@@ -3442,7 +3486,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // ----------- S t a t e -------------
-  //  -- eax    : key
+  //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
@@ -3459,7 +3503,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
     MacroAssembler* masm,
     ElementsKind elements_kind) {
   // ----------- S t a t e -------------
-  //  -- eax    : key
+  //  -- eax    : value
+  //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
@@ -3468,8 +3513,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
 
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(ecx, &miss_force_generic);
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
 
   // Check that the index is in range.
   __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
@@ -3564,12 +3609,39 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
       // (code-stubs-ia32.cc) is roughly what is needed here though the
       // conversion failure case does not need to be handled.
       if (CpuFeatures::IsSupported(SSE2)) {
-        if (elements_kind != EXTERNAL_INT_ELEMENTS &&
-            elements_kind != EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+        if ((elements_kind == EXTERNAL_INT_ELEMENTS ||
+             elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) &&
+            CpuFeatures::IsSupported(SSE3)) {
+          CpuFeatures::Scope scope(SSE3);
+          // fisttp stores values as signed integers. To represent the
+          // entire range of int and unsigned int arrays, store as a
+          // 64-bit int and discard the high 32 bits.
+          __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+          __ sub(esp, Immediate(2 * kPointerSize));
+          __ fisttp_d(Operand(esp, 0));
+
+          // If conversion failed (NaN, infinity, or a number outside
+          // signed int64 range), the result is 0x8000000000000000, and
+          // we must handle this case in the runtime.
+          Label ok;
+          __ cmp(Operand(esp, kPointerSize), Immediate(0x80000000u));
+          __ j(not_equal, &ok);
+          __ cmp(Operand(esp, 0), Immediate(0));
+          __ j(not_equal, &ok);
+          __ add(esp, Immediate(2 * kPointerSize));  // Restore the stack.
+          __ jmp(&slow);
+
+          __ bind(&ok);
+          __ pop(ebx);
+          __ add(esp, Immediate(kPointerSize));
+          __ mov(Operand(edi, ecx, times_2, 0), ebx);
+        } else {
           ASSERT(CpuFeatures::IsSupported(SSE2));
           CpuFeatures::Scope scope(SSE2);
           __ cvttsd2si(ebx, FieldOperand(eax, HeapNumber::kValueOffset));
-          // ecx: untagged integer value
+          __ cmp(ebx, 0x80000000u);
+          __ j(equal, &slow);
+          // ebx: untagged integer value
           switch (elements_kind) {
             case EXTERNAL_PIXEL_ELEMENTS:
               __ ClampUint8(ebx);
@@ -3583,41 +3655,14 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
             case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
               __ mov_w(Operand(edi, ecx, times_1, 0), ebx);
               break;
+            case EXTERNAL_INT_ELEMENTS:
+            case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+              __ mov(Operand(edi, ecx, times_2, 0), ebx);
+              break;
             default:
               UNREACHABLE();
               break;
           }
-        } else {
-          if (CpuFeatures::IsSupported(SSE3)) {
-            CpuFeatures::Scope scope(SSE3);
-            // fisttp stores values as signed integers. To represent the
-            // entire range of int and unsigned int arrays, store as a
-            // 64-bit int and discard the high 32 bits.
-            // If the value is NaN or +/-infinity, the result is 0x80000000,
-            // which is automatically zero when taken mod 2^n, n < 32.
-            __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
-            __ sub(esp, Immediate(2 * kPointerSize));
-            __ fisttp_d(Operand(esp, 0));
-            __ pop(ebx);
-            __ add(esp, Immediate(kPointerSize));
-          } else {
-            ASSERT(CpuFeatures::IsSupported(SSE2));
-            CpuFeatures::Scope scope(SSE2);
-            // We can easily implement the correct rounding behavior for the
-            // range [0, 2^31-1]. For the time being, to keep this code simple,
-            // make the slow runtime call for values outside this range.
-            // Note: we could do better for signed int arrays.
-            __ movd(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
-            // We will need the key if we have to make the slow runtime call.
-            __ push(ebx);
-            __ LoadPowerOf2(xmm1, ebx, 31);
-            __ pop(ebx);
-            __ ucomisd(xmm1, xmm0);
-            __ j(above_equal, &slow);
-            __ cvttsd2si(ebx, Operand(xmm0));
-          }
-          // ebx: untagged integer value
-          __ mov(Operand(edi, ecx, times_2, 0), ebx);
         }
         __ ret(0);  // Return original value.
       }
@@ -3655,7 +3700,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
 
 void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- eax    : key
+  //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
@@ -3664,19 +3709,19 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
 
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(eax, &miss_force_generic);
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
 
   // Get the elements array.
-  __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
-  __ AssertFastElements(ecx);
+  __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
+  __ AssertFastElements(eax);
 
   // Check that the key is within bounds.
-  __ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
+  __ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
   __ j(above_equal, &miss_force_generic);
 
   // Load the result and make sure it's not the hole.
-  __ mov(ebx, Operand(ecx, eax, times_2,
+  __ mov(ebx, Operand(eax, ecx, times_2,
                       FixedArray::kHeaderSize - kHeapObjectTag));
   __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
   __ j(equal, &miss_force_generic);
@@ -3693,7 +3738,7 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
 void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
     MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- eax    : key
+  //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
@@ -3702,39 +3747,38 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
 
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(eax, &miss_force_generic);
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
 
   // Get the elements array.
-  __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
-  __ AssertFastElements(ecx);
+  __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
+  __ AssertFastElements(eax);
 
   // Check that the key is within bounds.
-  __ cmp(eax, FieldOperand(ecx, FixedDoubleArray::kLengthOffset));
+  __ cmp(ecx, FieldOperand(eax, FixedDoubleArray::kLengthOffset));
   __ j(above_equal, &miss_force_generic);
 
   // Check for the hole
   uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
-  __ cmp(FieldOperand(ecx, eax, times_4, offset), Immediate(kHoleNanUpper32));
+  __ cmp(FieldOperand(eax, ecx, times_4, offset), Immediate(kHoleNanUpper32));
   __ j(equal, &miss_force_generic);
 
   // Always allocate a heap number for the result.
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatures::Scope use_sse2(SSE2);
-    __ movdbl(xmm0, FieldOperand(ecx, eax, times_4,
+    __ movdbl(xmm0, FieldOperand(eax, ecx, times_4,
                                  FixedDoubleArray::kHeaderSize));
   } else {
-    __ fld_d(FieldOperand(ecx, eax, times_4, FixedDoubleArray::kHeaderSize));
+    __ fld_d(FieldOperand(eax, ecx, times_4, FixedDoubleArray::kHeaderSize));
   }
-  __ AllocateHeapNumber(ecx, ebx, edi, &slow_allocate_heapnumber);
+  __ AllocateHeapNumber(eax, ebx, edi, &slow_allocate_heapnumber);
   // Set the value.
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatures::Scope use_sse2(SSE2);
-    __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
+    __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
   } else {
-    __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
+    __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
   }
-  __ mov(eax, ecx);
   __ ret(0);
 
   __ bind(&slow_allocate_heapnumber);
@@ -3771,8 +3815,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
 
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(ecx, &miss_force_generic);
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
 
   if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
     __ JumpIfNotSmi(eax, &transition_elements_kind);
@@ -3926,8 +3970,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
 
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(ecx, &miss_force_generic);
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, ecx, ebx, xmm0, xmm1, &miss_force_generic);
 
   // Get the elements array.
   __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
@@ -3988,6 +4032,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
 
     int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
     __ AllocateInNewSpace(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
+
     // Restore the key, which is known to be the array length.
     __ mov(ecx, Immediate(0));
 
index 3e9c7a8..9772b94 100644 (file)
@@ -352,9 +352,9 @@ void IC::Clear(Address address) {
       return KeyedStoreIC::Clear(address, target);
     case Code::CALL_IC: return CallIC::Clear(address, target);
     case Code::KEYED_CALL_IC:  return KeyedCallIC::Clear(address, target);
+    case Code::COMPARE_IC: return CompareIC::Clear(address, target);
     case Code::UNARY_OP_IC:
     case Code::BINARY_OP_IC:
-    case Code::COMPARE_IC:
     case Code::TO_BOOLEAN_IC:
       // Clearing these is tricky and does not
       // make any performance difference.
@@ -365,9 +365,8 @@ void IC::Clear(Address address) {
 
 
 void CallICBase::Clear(Address address, Code* target) {
+  if (target->ic_state() == UNINITIALIZED) return;
   bool contextual = CallICBase::Contextual::decode(target->extra_ic_state());
-  State state = target->ic_state();
-  if (state == UNINITIALIZED) return;
   Code* code =
       Isolate::Current()->stub_cache()->FindCallInitialize(
           target->arguments_count(),
@@ -410,6 +409,17 @@ void KeyedStoreIC::Clear(Address address, Code* target) {
 }
 
 
+void CompareIC::Clear(Address address, Code* target) {
+  // Only clear ICCompareStubs, we currently cannot clear generic CompareStubs.
+  if (target->major_key() != CodeStub::CompareIC) return;
+  // Only clear CompareICs that can retain objects.
+  if (target->compare_state() != KNOWN_OBJECTS) return;
+  Token::Value op = CompareIC::ComputeOperation(target);
+  SetTargetAtAddress(address, GetRawUninitialized(op));
+  PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK);
+}
+
+
 static bool HasInterceptorGetter(JSObject* object) {
   return !object->GetNamedInterceptor()->getter()->IsUndefined();
 }
@@ -665,7 +675,7 @@ Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
         // applicable.
         if (!holder.is_identical_to(receiver)) return Handle<Code>::null();
         return isolate()->stub_cache()->ComputeCallNormal(
-            argc, kind_, extra_state, IsQmlGlobal(holder));
+            argc, kind_, extra_state);
       }
       break;
     }
@@ -1053,18 +1063,33 @@ Handle<Code> KeyedLoadIC::ComputePolymorphicStub(
 }
 
 
+static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) {
+  // This helper implements a few common fast cases for converting
+  // non-smi keys of keyed loads/stores to a smi or a string.
+  if (key->IsHeapNumber()) {
+    double value = Handle<HeapNumber>::cast(key)->value();
+    if (isnan(value)) {
+      key = isolate->factory()->nan_symbol();
+    } else {
+      int int_value = FastD2I(value);
+      if (value == int_value && Smi::IsValid(int_value)) {
+        key = Handle<Smi>(Smi::FromInt(int_value));
+      }
+    }
+  } else if (key->IsUndefined()) {
+    key = isolate->factory()->undefined_symbol();
+  }
+  return key;
+}
+
+
 MaybeObject* KeyedLoadIC::Load(State state,
                                Handle<Object> object,
                                Handle<Object> key,
                                bool force_generic_stub) {
-  // Check for values that can be converted into a symbol.
-  // TODO(1295): Remove this code.
-  if (key->IsHeapNumber() &&
-      isnan(Handle<HeapNumber>::cast(key)->value())) {
-    key = isolate()->factory()->nan_symbol();
-  } else if (key->IsUndefined()) {
-    key = isolate()->factory()->undefined_symbol();
-  }
+  // Check for values that can be converted into a symbol directly or
+  // is representable as a smi.
+  key = TryConvertKey(key, isolate());
 
   if (key->IsSymbol()) {
     Handle<String> name = Handle<String>::cast(key);
@@ -1761,6 +1786,10 @@ MaybeObject* KeyedStoreIC::Store(State state,
                                  Handle<Object> key,
                                  Handle<Object> value,
                                  bool force_generic) {
+  // Check for values that can be converted into a symbol directly or
+  // is representable as a smi.
+  key = TryConvertKey(key, isolate());
+
   if (key->IsSymbol()) {
     Handle<String> name = Handle<String>::cast(key);
 
@@ -2377,7 +2406,7 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
 
     // Activate inlined smi code.
     if (previous_type == BinaryOpIC::UNINITIALIZED) {
-      PatchInlinedSmiCode(ic.address());
+      PatchInlinedSmiCode(ic.address(), ENABLE_INLINED_SMI_CHECK);
     }
   }
 
@@ -2438,6 +2467,14 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
 }
 
 
+Code* CompareIC::GetRawUninitialized(Token::Value op) {
+  ICCompareStub stub(op, UNINITIALIZED);
+  Code* code = NULL;
+  CHECK(stub.FindCodeInCache(&code));
+  return code;
+}
+
+
 Handle<Code> CompareIC::GetUninitialized(Token::Value op) {
   ICCompareStub stub(op, UNINITIALIZED);
   return stub.GetCode();
@@ -2452,6 +2489,12 @@ CompareIC::State CompareIC::ComputeState(Code* target) {
 }
 
 
+Token::Value CompareIC::ComputeOperation(Code* target) {
+  ASSERT(target->major_key() == CodeStub::CompareIC);
+  return static_cast<Token::Value>(target->compare_operation());
+}
+
+
 const char* CompareIC::GetStateName(State state) {
   switch (state) {
     case UNINITIALIZED: return "UNINITIALIZED";
index 9af865d..3b44abf 100644 (file)
@@ -110,16 +110,10 @@ class IC {
   // object that contains this IC site.
   RelocInfo::Mode ComputeMode();
 
-  bool IsQmlGlobal(Handle<Object> receiver) {
-    JSObject* qml_global = isolate_->context()->qml_global();
-    return !qml_global->IsUndefined() && qml_global == *receiver;
-  }
-
   // Returns if this IC is for contextual (no explicit receiver)
   // access to properties.
   bool IsContextual(Handle<Object> receiver) {
-    if (receiver->IsGlobalObject() ||
-        IsQmlGlobal(receiver)) {
+    if (receiver->IsGlobalObject()) {
       return SlowIsContextual();
     } else {
       ASSERT(!SlowIsContextual());
@@ -800,6 +794,9 @@ class CompareIC: public IC {
   // Helper function for determining the state of a compare IC.
   static State ComputeState(Code* target);
 
+  // Helper function for determining the operation a compare IC is for.
+  static Token::Value ComputeOperation(Code* target);
+
   static const char* GetStateName(State state);
 
  private:
@@ -810,7 +807,13 @@ class CompareIC: public IC {
   Condition GetCondition() const { return ComputeCondition(op_); }
   State GetState() { return ComputeState(target()); }
 
+  static Code* GetRawUninitialized(Token::Value op);
+
+  static void Clear(Address address, Code* target);
+
   Token::Value op_;
+
+  friend class IC;
 };
 
 
@@ -823,7 +826,8 @@ class ToBooleanIC: public IC {
 
 
 // Helper for BinaryOpIC and CompareIC.
-void PatchInlinedSmiCode(Address address);
+enum InlinedSmiCheck { ENABLE_INLINED_SMI_CHECK, DISABLE_INLINED_SMI_CHECK };
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check);
 
 } }  // namespace v8::internal
 
index 3e3d6c4..2dae6f2 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -100,7 +100,7 @@ void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
   int64_t old_bytes_rescanned = bytes_rescanned_;
   bytes_rescanned_ = old_bytes_rescanned + obj_size;
   if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
-    if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSize()) {
+    if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) {
       // If we have queued twice the heap size for rescanning then we are
       // going around in circles, scanning the same objects again and again
       // as the program mutates the heap faster than we can incrementally
@@ -118,13 +118,29 @@ void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
 
 
 void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
-  WhiteToGrey(obj, mark_bit);
+  Marking::WhiteToGrey(mark_bit);
   marking_deque_.PushGrey(obj);
 }
 
 
-void IncrementalMarking::WhiteToGrey(HeapObject* obj, MarkBit mark_bit) {
-  Marking::WhiteToGrey(mark_bit);
+bool IncrementalMarking::MarkObjectAndPush(HeapObject* obj) {
+  MarkBit mark_bit = Marking::MarkBitFrom(obj);
+  if (!mark_bit.Get()) {
+    WhiteToGreyAndPush(obj, mark_bit);
+    return true;
+  }
+  return false;
+}
+
+
+bool IncrementalMarking::MarkObjectWithoutPush(HeapObject* obj) {
+  MarkBit mark_bit = Marking::MarkBitFrom(obj);
+  if (!mark_bit.Get()) {
+    mark_bit.Set();
+    MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
+    return true;
+  }
+  return false;
 }
 
 
index 7bbd521..94afffa 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -42,6 +42,7 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
       state_(STOPPED),
       marking_deque_memory_(NULL),
       marking_deque_memory_committed_(false),
+      marker_(this, heap->mark_compact_collector()),
       steps_count_(0),
       steps_took_(0),
       longest_step_(0.0),
@@ -663,6 +664,22 @@ void IncrementalMarking::Hurry() {
       } else if (map == global_context_map) {
         // Global contexts have weak fields.
         VisitGlobalContext(Context::cast(obj), &marking_visitor);
+      } else if (map->instance_type() == MAP_TYPE) {
+        Map* map = Map::cast(obj);
+        heap_->ClearCacheOnMap(map);
+
+        // When map collection is enabled we have to mark through map's
+        // transitions and back pointers in a special way to make these links
+        // weak.  Only maps for subclasses of JSReceiver can have transitions.
+        STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+        if (FLAG_collect_maps &&
+            map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
+          marker_.MarkMapContents(map);
+        } else {
+          marking_visitor.VisitPointers(
+              HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
+              HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
+        }
       } else {
         obj->Iterate(&marking_visitor);
       }
@@ -807,12 +824,6 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
       Map* map = obj->map();
       if (map == filler_map) continue;
 
-      if (obj->IsMap()) {
-        Map* map = Map::cast(obj);
-        heap_->ClearCacheOnMap(map);
-      }
-
-
       int size = obj->SizeFromMap(map);
       bytes_to_process -= size;
       MarkBit map_mark_bit = Marking::MarkBitFrom(map);
@@ -830,6 +841,35 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
         MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
 
         VisitGlobalContext(ctx, &marking_visitor);
+      } else if (map->instance_type() == MAP_TYPE) {
+        Map* map = Map::cast(obj);
+        heap_->ClearCacheOnMap(map);
+
+        // When map collection is enabled we have to mark through map's
+        // transitions and back pointers in a special way to make these links
+        // weak.  Only maps for subclasses of JSReceiver can have transitions.
+        STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+        if (FLAG_collect_maps &&
+            map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
+          marker_.MarkMapContents(map);
+        } else {
+          marking_visitor.VisitPointers(
+              HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
+              HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
+        }
+      } else if (map->instance_type() == JS_FUNCTION_TYPE) {
+        marking_visitor.VisitPointers(
+            HeapObject::RawField(obj, JSFunction::kPropertiesOffset),
+            HeapObject::RawField(obj, JSFunction::kCodeEntryOffset));
+
+        marking_visitor.VisitCodeEntry(
+            obj->address() + JSFunction::kCodeEntryOffset);
+
+        marking_visitor.VisitPointers(
+            HeapObject::RawField(obj,
+                                 JSFunction::kCodeEntryOffset + kPointerSize),
+            HeapObject::RawField(obj,
+                                 JSFunction::kNonWeakFieldsEndOffset));
       } else {
         obj->IterateBody(map->instance_type(), size, &marking_visitor);
       }
@@ -938,7 +978,7 @@ void IncrementalMarking::ResetStepCounters() {
 
 
 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
-  return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSize();
+  return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
 }
 
 } }  // namespace v8::internal
index 8cbe6c1..39e8dae 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -154,8 +154,6 @@ class IncrementalMarking {
 
   inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
 
-  inline void WhiteToGrey(HeapObject* obj, MarkBit mark_bit);
-
   // Does white->black or keeps gray or black color. Returns true if converting
   // white to black.
   inline bool MarkBlackOrKeepGrey(MarkBit mark_bit) {
@@ -169,6 +167,16 @@ class IncrementalMarking {
     return true;
   }
 
+  // Marks the object grey and pushes it on the marking stack.
+  // Returns true if object needed marking and false otherwise.
+  // This is for incremental marking only.
+  INLINE(bool MarkObjectAndPush(HeapObject* obj));
+
+  // Marks the object black without pushing it on the marking stack.
+  // Returns true if object needed marking and false otherwise.
+  // This is for incremental marking only.
+  INLINE(bool MarkObjectWithoutPush(HeapObject* obj));
+
   inline int steps_count() {
     return steps_count_;
   }
@@ -260,6 +268,7 @@ class IncrementalMarking {
   VirtualMemory* marking_deque_memory_;
   bool marking_deque_memory_committed_;
   MarkingDeque marking_deque_;
+  Marker<IncrementalMarking> marker_;
 
   int steps_count_;
   double steps_took_;
index e344b86..7836110 100644 (file)
@@ -79,7 +79,7 @@ void Interface::DoAdd(
     PrintF("%*sthis = ", Nesting::current(), "");
     this->Print(Nesting::current());
     PrintF("%*s%s : ", Nesting::current(), "",
-           (*reinterpret_cast<String**>(name))->ToAsciiArray());
+           (*static_cast<String**>(name))->ToAsciiArray());
     interface->Print(Nesting::current());
   }
 #endif
@@ -97,7 +97,7 @@ void Interface::DoAdd(
 #ifdef DEBUG
     Nesting nested;
 #endif
-    reinterpret_cast<Interface*>(p->value)->Unify(interface, ok);
+    static_cast<Interface*>(p->value)->Unify(interface, ok);
   }
 
 #ifdef DEBUG
@@ -180,6 +180,15 @@ void Interface::DoUnify(Interface* that, bool* ok) {
     return;
   }
 
+  // Merge instance.
+  if (!that->instance_.is_null()) {
+    if (!this->instance_.is_null() && *this->instance_ != *that->instance_) {
+      *ok = false;
+      return;
+    }
+    this->instance_ = that->instance_;
+  }
+
   // Merge interfaces.
   this->flags_ |= that->flags_;
   that->forward_ = this;
index c2991cb..580f082 100644 (file)
@@ -86,6 +86,12 @@ class Interface : public ZoneObject {
     if (*ok) Chase()->flags_ |= MODULE;
   }
 
+  // Set associated instance object.
+  void MakeSingleton(Handle<JSModule> instance, bool* ok) {
+    *ok = IsModule() && Chase()->instance_.is_null();
+    if (*ok) Chase()->instance_ = instance;
+  }
+
   // Do not allow any further refinements, directly or through unification.
   void Freeze(bool* ok) {
     *ok = IsValue() || IsModule();
@@ -95,9 +101,6 @@ class Interface : public ZoneObject {
   // ---------------------------------------------------------------------------
   // Accessors.
 
-  // Look up an exported name. Returns NULL if not (yet) defined.
-  Interface* Lookup(Handle<String> name);
-
   // Check whether this is still a fully undetermined type.
   bool IsUnknown() { return Chase()->flags_ == NONE; }
 
@@ -110,6 +113,42 @@ class Interface : public ZoneObject {
   // Check whether this is closed (i.e. fully determined).
   bool IsFrozen() { return Chase()->flags_ & FROZEN; }
 
+  Handle<JSModule> Instance() { return Chase()->instance_; }
+
+  // Look up an exported name. Returns NULL if not (yet) defined.
+  Interface* Lookup(Handle<String> name);
+
+  // ---------------------------------------------------------------------------
+  // Iterators.
+
+  // Use like:
+  //   for (auto it = interface->iterator(); !it.done(); it.Advance()) {
+  //     ... it.name() ... it.interface() ...
+  //   }
+  class Iterator {
+   public:
+    bool done() const { return entry_ == NULL; }
+    Handle<String> name() const {
+      ASSERT(!done());
+      return Handle<String>(*static_cast<String**>(entry_->key));
+    }
+    Interface* interface() const {
+      ASSERT(!done());
+      return static_cast<Interface*>(entry_->value);
+    }
+    void Advance() { entry_ = exports_->Next(entry_); }
+
+   private:
+    friend class Interface;
+    explicit Iterator(const ZoneHashMap* exports)
+        : exports_(exports), entry_(exports ? exports->Start() : NULL) {}
+
+    const ZoneHashMap* exports_;
+    ZoneHashMap::Entry* entry_;
+  };
+
+  Iterator iterator() const { return Iterator(this->exports_); }
+
   // ---------------------------------------------------------------------------
   // Debugging.
 #ifdef DEBUG
@@ -129,6 +168,7 @@ class Interface : public ZoneObject {
   int flags_;
   Interface* forward_;     // Unification link
   ZoneHashMap* exports_;   // Module exports and their types (allocated lazily)
+  Handle<JSModule> instance_;
 
   explicit Interface(int flags)
     : flags_(flags),
index 535ffb1..0c97abd 100644 (file)
@@ -97,7 +97,6 @@ void ThreadLocalTop::InitializeInternal() {
   thread_id_ = ThreadId::Invalid();
   external_caught_exception_ = false;
   failed_access_check_callback_ = NULL;
-  user_object_comparison_callback_ = NULL;
   save_context_ = NULL;
   catcher_ = NULL;
   top_lookup_result_ = NULL;
@@ -744,12 +743,6 @@ void Isolate::SetFailedAccessCheckCallback(
   thread_local_top()->failed_access_check_callback_ = callback;
 }
 
-void Isolate::SetUserObjectComparisonCallback(
-    v8::UserObjectComparisonCallback callback) {
-  thread_local_top()->user_object_comparison_callback_ = callback;
-}
-
 
 void Isolate::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) {
   if (!thread_local_top()->failed_access_check_callback_) return;
@@ -1437,6 +1430,7 @@ void Isolate::ThreadDataTable::RemoveAllThreads(Isolate* isolate) {
 
 Isolate::Isolate()
     : state_(UNINITIALIZED),
+      embedder_data_(NULL),
       entry_stack_(NULL),
       stack_trace_nesting_level_(0),
       incomplete_message_(NULL),
@@ -1479,7 +1473,6 @@ Isolate::Isolate()
       string_tracker_(NULL),
       regexp_stack_(NULL),
       date_cache_(NULL),
-      embedder_data_(NULL),
       context_exit_happened_(false) {
   TRACE_ISOLATE(constructor);
 
@@ -1849,6 +1842,9 @@ bool Isolate::Init(Deserializer* des) {
   // stack guard.
   heap_.SetStackLimits();
 
+  // Quiet the heap NaN if needed on target platform.
+  if (des != NULL) Assembler::QuietNaN(heap_.nan_value());
+
   deoptimizer_data_ = new DeoptimizerData;
   runtime_profiler_ = new RuntimeProfiler(this);
   runtime_profiler_->SetUp();
@@ -1861,6 +1857,13 @@ bool Isolate::Init(Deserializer* des) {
     LOG(this, LogCompiledFunctions());
   }
 
+  CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, state_)),
+           Internals::kIsolateStateOffset);
+  CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, embedder_data_)),
+           Internals::kIsolateEmbedderDataOffset);
+  CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.roots_)),
+           Internals::kIsolateRootsOffset);
+
   state_ = INITIALIZED;
   time_millis_at_init_ = OS::TimeCurrentMillis();
   return true;
index 9f964a0..f1c9b3c 100644 (file)
@@ -273,9 +273,6 @@ class ThreadLocalTop BASE_EMBEDDED {
   // Head of the list of live LookupResults.
   LookupResult* top_lookup_result_;
 
-  // Call back function for user object comparisons
-  v8::UserObjectComparisonCallback user_object_comparison_callback_;
-
   // Whether out of memory exceptions should be ignored.
   bool ignore_out_of_memory_;
 
@@ -425,7 +422,7 @@ class Isolate {
   enum AddressId {
 #define DECLARE_ENUM(CamelName, hacker_name) k##CamelName##Address,
     FOR_EACH_ISOLATE_ADDRESS_NAME(DECLARE_ENUM)
-#undef C
+#undef DECLARE_ENUM
     kIsolateAddressCount
   };
 
@@ -705,11 +702,6 @@ class Isolate {
   void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
   void ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type);
 
-  void SetUserObjectComparisonCallback(v8::UserObjectComparisonCallback callback);
-  inline v8::UserObjectComparisonCallback UserObjectComparisonCallback() { 
-      return thread_local_top()->user_object_comparison_callback_;
-  }
-
   // Exception throwing support. The caller should use the result
   // of Throw() as its return value.
   Failure* Throw(Object* exception, MessageLocation* location = NULL);
@@ -1046,6 +1038,18 @@ class Isolate {
   friend struct GlobalState;
   friend struct InitializeGlobalState;
 
+  enum State {
+    UNINITIALIZED,    // Some components may not have been allocated.
+    INITIALIZED       // All components are fully initialized.
+  };
+
+  // These fields are accessed through the API, offsets must be kept in sync
+  // with v8::internal::Internals (in include/v8.h) constants. This is also
+  // verified in Isolate::Init() using runtime checks.
+  State state_;  // Will be padded to kApiPointerSize.
+  void* embedder_data_;
+  Heap heap_;
+
   // The per-process lock should be acquired before the ThreadDataTable is
   // modified.
   class ThreadDataTable {
@@ -1103,14 +1107,6 @@ class Isolate {
   static void SetIsolateThreadLocals(Isolate* isolate,
                                      PerIsolateThreadData* data);
 
-  enum State {
-    UNINITIALIZED,    // Some components may not have been allocated.
-    INITIALIZED       // All components are fully initialized.
-  };
-
-  State state_;
-  EntryStackItem* entry_stack_;
-
   // Allocate and insert PerIsolateThreadData into the ThreadDataTable
   // (regardless of whether such data already exists).
   PerIsolateThreadData* AllocatePerIsolateThreadData(ThreadId thread_id);
@@ -1154,13 +1150,13 @@ class Isolate {
   // the Error object.
   bool IsErrorObject(Handle<Object> obj);
 
+  EntryStackItem* entry_stack_;
   int stack_trace_nesting_level_;
   StringStream* incomplete_message_;
   // The preallocated memory thread singleton.
   PreallocatedMemoryThread* preallocated_memory_thread_;
   Address isolate_addresses_[kIsolateAddressCount + 1];  // NOLINT
   NoAllocationStringAllocator* preallocated_message_space_;
-
   Bootstrapper* bootstrapper_;
   RuntimeProfiler* runtime_profiler_;
   CompilationCache* compilation_cache_;
@@ -1169,7 +1165,6 @@ class Isolate {
   Mutex* break_access_;
   Atomic32 debugger_initialized_;
   Mutex* debugger_access_;
-  Heap heap_;
   Logger* logger_;
   StackGuard stack_guard_;
   StatsTable* stats_table_;
@@ -1210,11 +1205,8 @@ class Isolate {
   unibrow::Mapping<unibrow::Ecma262Canonicalize>
       regexp_macro_assembler_canonicalize_;
   RegExpStack* regexp_stack_;
-
   DateCache* date_cache_;
-
   unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
-  void* embedder_data_;
 
   // The garbage collector should be a little more aggressive when it knows
   // that a context was recently exited.
index b7d0d30..3455abc 100644 (file)
@@ -108,6 +108,30 @@ static inline void ThrowRegExpException(Handle<JSRegExp> re,
 }
 
 
+ContainedInLattice AddRange(ContainedInLattice containment,
+                            const int* ranges,
+                            int ranges_length,
+                            Interval new_range) {
+  ASSERT((ranges_length & 1) == 1);
+  ASSERT(ranges[ranges_length - 1] == String::kMaxUtf16CodeUnit + 1);
+  if (containment == kLatticeUnknown) return containment;
+  bool inside = false;
+  int last = 0;
+  for (int i = 0; i < ranges_length; inside = !inside, last = ranges[i], i++) {
+    // Consider the range from last to ranges[i].
+    // We haven't got to the new range yet.
+    if (ranges[i] <= new_range.from()) continue;
+    // New range is wholly inside last-ranges[i].  Note that new_range.to() is
+    // inclusive, but the values in ranges are not.
+    if (last <= new_range.from() && new_range.to() < ranges[i]) {
+      return Combine(containment, inside ? kLatticeIn : kLatticeOut);
+    }
+    return kLatticeUnknown;
+  }
+  return containment;
+}
+
+
 // More makes code generation slower, less makes V8 benchmark score lower.
 const int kMaxLookaheadForBoyerMoore = 8;
 // In a 3-character pattern you can maximally step forwards 3 characters
@@ -2157,6 +2181,7 @@ void ActionNode::FillInBMInfo(int offset,
   } else if (type_ != POSITIVE_SUBMATCH_SUCCESS) {
     on_success()->FillInBMInfo(offset, bm, not_at_start);
   }
+  SaveBMInfo(bm, not_at_start, offset);
 }
 
 
@@ -2181,6 +2206,7 @@ void AssertionNode::FillInBMInfo(
   // Match the behaviour of EatsAtLeast on this node.
   if (type() == AT_START && not_at_start) return;
   on_success()->FillInBMInfo(offset, bm, not_at_start);
+  SaveBMInfo(bm, not_at_start, offset);
 }
 
 
@@ -2522,10 +2548,12 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
     }
   }
   ASSERT(characters_filled_in != details->characters());
-  on_success()-> GetQuickCheckDetails(details,
-                                      compiler,
-                                      characters_filled_in,
-                                      true);
+  if (!details->cannot_match()) {
+    on_success()-> GetQuickCheckDetails(details,
+                                        compiler,
+                                        characters_filled_in,
+                                        true);
+  }
 }
 
 
@@ -2603,6 +2631,148 @@ class VisitMarker {
 };
 
 
+RegExpNode* SeqRegExpNode::FilterASCII(int depth) {
+  if (info()->replacement_calculated) return replacement();
+  if (depth < 0) return this;
+  ASSERT(!info()->visited);
+  VisitMarker marker(info());
+  return FilterSuccessor(depth - 1);
+}
+
+
+RegExpNode* SeqRegExpNode::FilterSuccessor(int depth) {
+  RegExpNode* next = on_success_->FilterASCII(depth - 1);
+  if (next == NULL) return set_replacement(NULL);
+  on_success_ = next;
+  return set_replacement(this);
+}
+
+
+RegExpNode* TextNode::FilterASCII(int depth) {
+  if (info()->replacement_calculated) return replacement();
+  if (depth < 0) return this;
+  ASSERT(!info()->visited);
+  VisitMarker marker(info());
+  int element_count = elms_->length();
+  for (int i = 0; i < element_count; i++) {
+    TextElement elm = elms_->at(i);
+    if (elm.type == TextElement::ATOM) {
+      Vector<const uc16> quarks = elm.data.u_atom->data();
+      for (int j = 0; j < quarks.length(); j++) {
+        // We don't need special handling for case independence
+        // because of the rule that case independence cannot make
+        // a non-ASCII character match an ASCII character.
+        if (quarks[j] > String::kMaxAsciiCharCode) {
+          return set_replacement(NULL);
+        }
+      }
+    } else {
+      ASSERT(elm.type == TextElement::CHAR_CLASS);
+      RegExpCharacterClass* cc = elm.data.u_char_class;
+      ZoneList<CharacterRange>* ranges = cc->ranges();
+      if (!CharacterRange::IsCanonical(ranges)) {
+        CharacterRange::Canonicalize(ranges);
+      }
+      // Now they are in order so we only need to look at the first.
+      int range_count = ranges->length();
+      if (cc->is_negated()) {
+        if (range_count != 0 &&
+            ranges->at(0).from() == 0 &&
+            ranges->at(0).to() >= String::kMaxAsciiCharCode) {
+          return set_replacement(NULL);
+        }
+      } else {
+        if (range_count == 0 ||
+            ranges->at(0).from() > String::kMaxAsciiCharCode) {
+          return set_replacement(NULL);
+        }
+      }
+    }
+  }
+  return FilterSuccessor(depth - 1);
+}
+
+
+RegExpNode* LoopChoiceNode::FilterASCII(int depth) {
+  if (info()->replacement_calculated) return replacement();
+  if (depth < 0) return this;
+  if (info()->visited) return this;
+  {
+    VisitMarker marker(info());
+
+    RegExpNode* continue_replacement = continue_node_->FilterASCII(depth - 1);
+    // If we can't continue after the loop then there is no sense in doing the
+    // loop.
+    if (continue_replacement == NULL) return set_replacement(NULL);
+  }
+
+  return ChoiceNode::FilterASCII(depth - 1);
+}
+
+
+RegExpNode* ChoiceNode::FilterASCII(int depth) {
+  if (info()->replacement_calculated) return replacement();
+  if (depth < 0) return this;
+  if (info()->visited) return this;
+  VisitMarker marker(info());
+  int choice_count = alternatives_->length();
+  int surviving = 0;
+  RegExpNode* survivor = NULL;
+  for (int i = 0; i < choice_count; i++) {
+    GuardedAlternative alternative = alternatives_->at(i);
+    RegExpNode* replacement = alternative.node()->FilterASCII(depth - 1);
+    ASSERT(replacement != this);  // No missing EMPTY_MATCH_CHECK.
+    if (replacement != NULL) {
+      alternatives_->at(i).set_node(replacement);
+      surviving++;
+      survivor = replacement;
+    }
+  }
+  if (surviving < 2) return set_replacement(survivor);
+
+  set_replacement(this);
+  if (surviving == choice_count) {
+    return this;
+  }
+  // Only some of the nodes survived the filtering.  We need to rebuild the
+  // alternatives list.
+  ZoneList<GuardedAlternative>* new_alternatives =
+      new ZoneList<GuardedAlternative>(surviving);
+  for (int i = 0; i < choice_count; i++) {
+    RegExpNode* replacement =
+        alternatives_->at(i).node()->FilterASCII(depth - 1);
+    if (replacement != NULL) {
+      alternatives_->at(i).set_node(replacement);
+      new_alternatives->Add(alternatives_->at(i));
+    }
+  }
+  alternatives_ = new_alternatives;
+  return this;
+}
+
+
+RegExpNode* NegativeLookaheadChoiceNode::FilterASCII(int depth) {
+  if (info()->replacement_calculated) return replacement();
+  if (depth < 0) return this;
+  if (info()->visited) return this;
+  VisitMarker marker(info());
+  // Alternative 0 is the negative lookahead, alternative 1 is what comes
+  // afterwards.
+  RegExpNode* node = alternatives_->at(1).node();
+  RegExpNode* replacement = node->FilterASCII(depth - 1);
+  if (replacement == NULL) return set_replacement(NULL);
+  alternatives_->at(1).set_node(replacement);
+
+  RegExpNode* neg_node = alternatives_->at(0).node();
+  RegExpNode* neg_replacement = neg_node->FilterASCII(depth - 1);
+  // If the negative lookahead is always going to fail then
+  // we don't need to check it.
+  if (neg_replacement == NULL) return set_replacement(replacement);
+  alternatives_->at(0).set_node(neg_replacement);
+  return set_replacement(this);
+}
+
+
 void LoopChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
                                           RegExpCompiler* compiler,
                                           int characters_filled_in,
@@ -2617,12 +2787,14 @@ void LoopChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
 
 
 void LoopChoiceNode::FillInBMInfo(
-    int offset, BoyerMooreLookahead* bm, bool nas) {
+    int offset, BoyerMooreLookahead* bm, bool not_at_start) {
   if (body_can_be_zero_length_) {
     bm->SetRest(offset);
+    SaveBMInfo(bm, not_at_start, offset);
     return;
   }
-  ChoiceNode::FillInBMInfo(offset, bm, nas);
+  ChoiceNode::FillInBMInfo(offset, bm, not_at_start);
+  SaveBMInfo(bm, not_at_start, offset);
 }
 
 
@@ -2710,110 +2882,83 @@ static void EmitHat(RegExpCompiler* compiler,
 }
 
 
-// Emit the code to handle \b and \B (word-boundary or non-word-boundary)
-// when we know whether the next character must be a word character or not.
-static void EmitHalfBoundaryCheck(AssertionNode::AssertionNodeType type,
-                                  RegExpCompiler* compiler,
-                                  RegExpNode* on_success,
-                                  Trace* trace) {
+// Emit the code to handle \b and \B (word-boundary or non-word-boundary).
+void AssertionNode::EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace) {
   RegExpMacroAssembler* assembler = compiler->macro_assembler();
-  Label done;
-
-  Trace new_trace(*trace);
-
-  bool expect_word_character = (type == AssertionNode::AFTER_WORD_CHARACTER);
-  Label* on_word = expect_word_character ? &done : new_trace.backtrack();
-  Label* on_non_word = expect_word_character ? new_trace.backtrack() : &done;
-
-  // Check whether previous character was a word character.
-  switch (trace->at_start()) {
-    case Trace::TRUE:
-      if (expect_word_character) {
-        assembler->GoTo(on_non_word);
-      }
-      break;
-    case Trace::UNKNOWN:
-      ASSERT_EQ(0, trace->cp_offset());
-      assembler->CheckAtStart(on_non_word);
-      // Fall through.
-    case Trace::FALSE:
-      int prev_char_offset = trace->cp_offset() - 1;
-      assembler->LoadCurrentCharacter(prev_char_offset, NULL, false, 1);
-      EmitWordCheck(assembler, on_word, on_non_word, expect_word_character);
-      // We may or may not have loaded the previous character.
-      new_trace.InvalidateCurrentCharacter();
+  Trace::TriBool next_is_word_character = Trace::UNKNOWN;
+  bool not_at_start = (trace->at_start() == Trace::FALSE);
+  BoyerMooreLookahead* lookahead = bm_info(not_at_start);
+  if (lookahead == NULL) {
+    int eats_at_least =
+        Min(kMaxLookaheadForBoyerMoore,
+            EatsAtLeast(kMaxLookaheadForBoyerMoore, 0, not_at_start));
+    if (eats_at_least >= 1) {
+      BoyerMooreLookahead* bm =
+          new BoyerMooreLookahead(eats_at_least, compiler);
+      FillInBMInfo(0, bm, not_at_start);
+      if (bm->at(0)->is_non_word()) next_is_word_character = Trace::FALSE;
+      if (bm->at(0)->is_word()) next_is_word_character = Trace::TRUE;
+    }
+  } else {
+    if (lookahead->at(0)->is_non_word()) next_is_word_character = Trace::FALSE;
+    if (lookahead->at(0)->is_word()) next_is_word_character = Trace::TRUE;
+  }
+  bool at_boundary = (type_ == AssertionNode::AT_BOUNDARY);
+  if (next_is_word_character == Trace::UNKNOWN) {
+    Label before_non_word;
+    Label before_word;
+    if (trace->characters_preloaded() != 1) {
+      assembler->LoadCurrentCharacter(trace->cp_offset(), &before_non_word);
+    }
+    // Fall through on non-word.
+    EmitWordCheck(assembler, &before_word, &before_non_word, false);
+    // Next character is not a word character.
+    assembler->Bind(&before_non_word);
+    Label ok;
+    BacktrackIfPrevious(compiler, trace, at_boundary ? kIsNonWord : kIsWord);
+    assembler->GoTo(&ok);
+
+    assembler->Bind(&before_word);
+    BacktrackIfPrevious(compiler, trace, at_boundary ? kIsWord : kIsNonWord);
+    assembler->Bind(&ok);
+  } else if (next_is_word_character == Trace::TRUE) {
+    BacktrackIfPrevious(compiler, trace, at_boundary ? kIsWord : kIsNonWord);
+  } else {
+    ASSERT(next_is_word_character == Trace::FALSE);
+    BacktrackIfPrevious(compiler, trace, at_boundary ? kIsNonWord : kIsWord);
   }
-
-  assembler->Bind(&done);
-
-  on_success->Emit(compiler, &new_trace);
 }
 
 
-// Emit the code to handle \b and \B (word-boundary or non-word-boundary).
-static void EmitBoundaryCheck(AssertionNode::AssertionNodeType type,
-                              RegExpCompiler* compiler,
-                              RegExpNode* on_success,
-                              Trace* trace) {
+void AssertionNode::BacktrackIfPrevious(
+    RegExpCompiler* compiler,
+    Trace* trace,
+    AssertionNode::IfPrevious backtrack_if_previous) {
   RegExpMacroAssembler* assembler = compiler->macro_assembler();
-  Label before_non_word;
-  Label before_word;
-  if (trace->characters_preloaded() != 1) {
-    assembler->LoadCurrentCharacter(trace->cp_offset(), &before_non_word);
-  }
-  // Fall through on non-word.
-  EmitWordCheck(assembler, &before_word, &before_non_word, false);
-
-  // We will be loading the previous character into the current character
-  // register.
   Trace new_trace(*trace);
   new_trace.InvalidateCurrentCharacter();
 
-  Label ok;
-  Label* boundary;
-  Label* not_boundary;
-  if (type == AssertionNode::AT_BOUNDARY) {
-    boundary = &ok;
-    not_boundary = new_trace.backtrack();
-  } else {
-    not_boundary = &ok;
-    boundary = new_trace.backtrack();
-  }
+  Label fall_through, dummy;
 
-  // Next character is not a word character.
-  assembler->Bind(&before_non_word);
-  if (new_trace.cp_offset() == 0) {
-    // The start of input counts as a non-word character, so the question is
-    // decided if we are at the start.
-    assembler->CheckAtStart(not_boundary);
-  }
-  // We already checked that we are not at the start of input so it must be
-  // OK to load the previous character.
-  assembler->LoadCurrentCharacter(new_trace.cp_offset() - 1,
-                                  &ok,  // Unused dummy label in this call.
-                                  false);
-  // Fall through on non-word.
-  EmitWordCheck(assembler, boundary, not_boundary, false);
-  assembler->GoTo(not_boundary);
+  Label* non_word = backtrack_if_previous == kIsNonWord ?
+                    new_trace.backtrack() :
+                    &fall_through;
+  Label* word = backtrack_if_previous == kIsNonWord ?
+                &fall_through :
+                new_trace.backtrack();
 
-  // Next character is a word character.
-  assembler->Bind(&before_word);
   if (new_trace.cp_offset() == 0) {
     // The start of input counts as a non-word character, so the question is
     // decided if we are at the start.
-    assembler->CheckAtStart(boundary);
+    assembler->CheckAtStart(non_word);
   }
   // We already checked that we are not at the start of input so it must be
   // OK to load the previous character.
-  assembler->LoadCurrentCharacter(new_trace.cp_offset() - 1,
-                                  &ok,  // Unused dummy label in this call.
-                                  false);
-  bool fall_through_on_word = (type == AssertionNode::AT_NON_BOUNDARY);
-  EmitWordCheck(assembler, not_boundary, boundary, fall_through_on_word);
-
-  assembler->Bind(&ok);
+  assembler->LoadCurrentCharacter(new_trace.cp_offset() - 1, &dummy, false);
+  EmitWordCheck(assembler, word, non_word, backtrack_if_previous == kIsNonWord);
 
-  on_success->Emit(compiler, &new_trace);
+  assembler->Bind(&fall_through);
+  on_success()->Emit(compiler, &new_trace);
 }
 
 
@@ -2861,13 +3006,9 @@ void AssertionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
       return;
     case AT_BOUNDARY:
     case AT_NON_BOUNDARY: {
-      EmitBoundaryCheck(type_, compiler, on_success(), trace);
+      EmitBoundaryCheck(compiler, trace);
       return;
     }
-    case AFTER_WORD_CHARACTER:
-    case AFTER_NONWORD_CHARACTER: {
-      EmitHalfBoundaryCheck(type_, compiler, on_success(), trace);
-    }
   }
   on_success()->Emit(compiler, trace);
 }
@@ -3277,24 +3418,74 @@ class AlternativeGenerationList {
 };
 
 
+// The '2' variant is has inclusive from and exclusive to.
+static const int kSpaceRanges[] = { '\t', '\r' + 1, ' ', ' ' + 1, 0x00A0,
+    0x00A1, 0x1680, 0x1681, 0x180E, 0x180F, 0x2000, 0x200B, 0x2028, 0x202A,
+    0x202F, 0x2030, 0x205F, 0x2060, 0x3000, 0x3001, 0xFEFF, 0xFF00, 0x10000 };
+static const int kSpaceRangeCount = ARRAY_SIZE(kSpaceRanges);
+
+static const int kWordRanges[] = {
+    '0', '9' + 1, 'A', 'Z' + 1, '_', '_' + 1, 'a', 'z' + 1, 0x10000 };
+static const int kWordRangeCount = ARRAY_SIZE(kWordRanges);
+static const int kDigitRanges[] = { '0', '9' + 1, 0x10000 };
+static const int kDigitRangeCount = ARRAY_SIZE(kDigitRanges);
+static const int kSurrogateRanges[] = { 0xd800, 0xe000, 0x10000 };
+static const int kSurrogateRangeCount = ARRAY_SIZE(kSurrogateRanges);
+static const int kLineTerminatorRanges[] = { 0x000A, 0x000B, 0x000D, 0x000E,
+    0x2028, 0x202A, 0x10000 };
+static const int kLineTerminatorRangeCount = ARRAY_SIZE(kLineTerminatorRanges);
+
+
+void BoyerMoorePositionInfo::Set(int character) {
+  SetInterval(Interval(character, character));
+}
+
+
+void BoyerMoorePositionInfo::SetInterval(const Interval& interval) {
+  s_ = AddRange(s_, kSpaceRanges, kSpaceRangeCount, interval);
+  w_ = AddRange(w_, kWordRanges, kWordRangeCount, interval);
+  d_ = AddRange(d_, kDigitRanges, kDigitRangeCount, interval);
+  surrogate_ =
+      AddRange(surrogate_, kSurrogateRanges, kSurrogateRangeCount, interval);
+  if (interval.to() - interval.from() >= kMapSize - 1) {
+    if (map_count_ != kMapSize) {
+      map_count_ = kMapSize;
+      for (int i = 0; i < kMapSize; i++) map_->at(i) = true;
+    }
+    return;
+  }
+  for (int i = interval.from(); i <= interval.to(); i++) {
+    int mod_character = (i & kMask);
+    if (!map_->at(mod_character)) {
+      map_count_++;
+      map_->at(mod_character) = true;
+    }
+    if (map_count_ == kMapSize) return;
+  }
+}
+
+
+void BoyerMoorePositionInfo::SetAll() {
+  s_ = w_ = d_ = kLatticeUnknown;
+  if (map_count_ != kMapSize) {
+    map_count_ = kMapSize;
+    for (int i = 0; i < kMapSize; i++) map_->at(i) = true;
+  }
+}
+
+
 BoyerMooreLookahead::BoyerMooreLookahead(
-    int length, int map_length, RegExpCompiler* compiler)
+    int length, RegExpCompiler* compiler)
     : length_(length),
-      map_length_(map_length),
       compiler_(compiler) {
-  ASSERT(IsPowerOf2(map_length));
   if (compiler->ascii()) {
     max_char_ = String::kMaxAsciiCharCode;
   } else {
     max_char_ = String::kMaxUtf16CodeUnit;
   }
-  bitmaps_ = new ZoneList<ZoneList<bool>*>(length);
+  bitmaps_ = new ZoneList<BoyerMoorePositionInfo*>(length);
   for (int i = 0; i < length; i++) {
-    bitmaps_->Add(new ZoneList<bool>(map_length));
-    ZoneList<bool>* map = bitmaps_->at(i);
-    for (int i = 0; i < map_length; i++) {
-      map->Add(false);
-    }
+    bitmaps_->Add(new BoyerMoorePositionInfo());
   }
 }
 
@@ -3304,8 +3495,11 @@ BoyerMooreLookahead::BoyerMooreLookahead(
 // different parameters at once this is a tradeoff.
 bool BoyerMooreLookahead::FindWorthwhileInterval(int* from, int* to) {
   int biggest_points = 0;
+  // If more than 32 characters out of 128 can occur it is unlikely that we can
+  // be lucky enough to step forwards much of the time.
+  const int kMaxMax = 32;
   for (int max_number_of_chars = 4;
-       max_number_of_chars < kTooManyCharacters;
+       max_number_of_chars < kMaxMax;
        max_number_of_chars *= 2) {
     biggest_points =
         FindBestInterval(max_number_of_chars, biggest_points, from, to);
@@ -3332,7 +3526,7 @@ int BoyerMooreLookahead::FindBestInterval(
     bool union_map[kSize];
     for (int j = 0; j < kSize; j++) union_map[j] = false;
     while (i < length_ && Count(i) <= max_number_of_chars) {
-      ZoneList<bool>* map = bitmaps_->at(i);
+      BoyerMoorePositionInfo* map = bitmaps_->at(i);
       for (int j = 0; j < kSize; j++) union_map[j] |= map->at(j);
       i++;
     }
@@ -3387,8 +3581,8 @@ int BoyerMooreLookahead::GetSkipTable(int min_lookahead,
   int skip = max_lookahead + 1 - min_lookahead;
 
   for (int i = max_lookahead; i >= min_lookahead; i--) {
-    ZoneList<bool>* map = bitmaps_->at(i);
-    for (int j = 0; j < map_length_; j++) {
+    BoyerMoorePositionInfo* map = bitmaps_->at(i);
+    for (int j = 0; j < kSize; j++) {
       if (map->at(j)) {
         boolean_skip_table->set(j, kDontSkipArrayEntry);
       }
@@ -3401,29 +3595,29 @@ int BoyerMooreLookahead::GetSkipTable(int min_lookahead,
 
 // See comment above on the implementation of GetSkipTable.
 bool BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
+  const int kSize = RegExpMacroAssembler::kTableSize;
+
   int min_lookahead = 0;
   int max_lookahead = 0;
 
   if (!FindWorthwhileInterval(&min_lookahead, &max_lookahead)) return false;
 
   bool found_single_character = false;
-  bool abandoned_search_for_single_character = false;
   int single_character = 0;
   for (int i = max_lookahead; i >= min_lookahead; i--) {
-    ZoneList<bool>* map = bitmaps_->at(i);
-    for (int j = 0; j < map_length_; j++) {
+    BoyerMoorePositionInfo* map = bitmaps_->at(i);
+    if (map->map_count() > 1 ||
+        (found_single_character && map->map_count() != 0)) {
+      found_single_character = false;
+      break;
+    }
+    for (int j = 0; j < kSize; j++) {
       if (map->at(j)) {
-        if (found_single_character) {
-          found_single_character = false;  // Found two.
-          abandoned_search_for_single_character = true;
-          break;
-        } else {
-          found_single_character = true;
-          single_character = j;
-        }
+        found_single_character = true;
+        single_character = j;
+        break;
       }
     }
-    if (abandoned_search_for_single_character) break;
   }
 
   int lookahead_width = max_lookahead + 1 - min_lookahead;
@@ -3437,8 +3631,7 @@ bool BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
     Label cont, again;
     masm->Bind(&again);
     masm->LoadCurrentCharacter(max_lookahead, &cont, true);
-    if (max_char_ > map_length_) {
-      ASSERT(map_length_ == RegExpMacroAssembler::kTableSize);
+    if (max_char_ > kSize) {
       masm->CheckCharacterAfterAnd(single_character,
                                    RegExpMacroAssembler::kTableMask,
                                    &cont);
@@ -3452,7 +3645,7 @@ bool BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
   }
 
   Handle<ByteArray> boolean_skip_table =
-      FACTORY->NewByteArray(map_length_, TENURED);
+      FACTORY->NewByteArray(kSize, TENURED);
   int skip_distance = GetSkipTable(
       min_lookahead, max_lookahead, boolean_skip_table);
   ASSERT(skip_distance != 0);
@@ -3631,16 +3824,20 @@ void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
         // not be atoms, they can be any reasonably limited character class or
         // small alternation.
         ASSERT(trace->is_trivial());  // This is the case on LoopChoiceNodes.
-        eats_at_least =
-            Min(kMaxLookaheadForBoyerMoore,
-                EatsAtLeast(kMaxLookaheadForBoyerMoore, 0, not_at_start));
-        if (eats_at_least >= 1) {
-          BoyerMooreLookahead bm(eats_at_least,
-                                 RegExpMacroAssembler::kTableSize,
-                                 compiler);
-          GuardedAlternative alt0 = alternatives_->at(0);
-          alt0.node()->FillInBMInfo(0, &bm, not_at_start);
-          skip_was_emitted = bm.EmitSkipInstructions(macro_assembler);
+        BoyerMooreLookahead* lookahead = bm_info(not_at_start);
+        if (lookahead == NULL) {
+          eats_at_least =
+              Min(kMaxLookaheadForBoyerMoore,
+                  EatsAtLeast(kMaxLookaheadForBoyerMoore, 0, not_at_start));
+          if (eats_at_least >= 1) {
+            BoyerMooreLookahead* bm =
+                new BoyerMooreLookahead(eats_at_least, compiler);
+            GuardedAlternative alt0 = alternatives_->at(0);
+            alt0.node()->FillInBMInfo(0, bm, not_at_start);
+            skip_was_emitted = bm->EmitSkipInstructions(macro_assembler);
+          }
+        } else {
+          skip_was_emitted = lookahead->EmitSkipInstructions(macro_assembler);
         }
       }
     }
@@ -4203,12 +4400,6 @@ void DotPrinter::VisitAssertion(AssertionNode* that) {
     case AssertionNode::AFTER_NEWLINE:
       stream()->Add("label=\"(?<=\\n)\", shape=septagon");
       break;
-    case AssertionNode::AFTER_WORD_CHARACTER:
-      stream()->Add("label=\"(?<=\\w)\", shape=septagon");
-      break;
-    case AssertionNode::AFTER_NONWORD_CHARACTER:
-      stream()->Add("label=\"(?<=\\W)\", shape=septagon");
-      break;
   }
   stream()->Add("];\n");
   PrintAttributes(that);
@@ -4313,21 +4504,6 @@ void RegExpEngine::DotPrint(const char* label,
 // -------------------------------------------------------------------
 // Tree to graph conversion
 
-static const uc16 kSpaceRanges[] = { 0x0009, 0x000D, 0x0020, 0x0020, 0x00A0,
-    0x00A0, 0x1680, 0x1680, 0x180E, 0x180E, 0x2000, 0x200A, 0x2028, 0x2029,
-    0x202F, 0x202F, 0x205F, 0x205F, 0x3000, 0x3000, 0xFEFF, 0xFEFF };
-static const int kSpaceRangeCount = ARRAY_SIZE(kSpaceRanges);
-
-static const uc16 kWordRanges[] = { '0', '9', 'A', 'Z', '_', '_', 'a', 'z' };
-static const int kWordRangeCount = ARRAY_SIZE(kWordRanges);
-
-static const uc16 kDigitRanges[] = { '0', '9' };
-static const int kDigitRangeCount = ARRAY_SIZE(kDigitRanges);
-
-static const uc16 kLineTerminatorRanges[] = { 0x000A, 0x000A, 0x000D, 0x000D,
-    0x2028, 0x2029 };
-static const int kLineTerminatorRangeCount = ARRAY_SIZE(kLineTerminatorRanges);
-
 RegExpNode* RegExpAtom::ToNode(RegExpCompiler* compiler,
                                RegExpNode* on_success) {
   ZoneList<TextElement>* elms = new ZoneList<TextElement>(1);
@@ -4341,9 +4517,12 @@ RegExpNode* RegExpText::ToNode(RegExpCompiler* compiler,
   return new TextNode(elements(), on_success);
 }
 
+
 static bool CompareInverseRanges(ZoneList<CharacterRange>* ranges,
-                                 const uc16* special_class,
+                                 const int* special_class,
                                  int length) {
+  length--;  // Remove final 0x10000.
+  ASSERT(special_class[length] == 0x10000);
   ASSERT(ranges->length() != 0);
   ASSERT(length != 0);
   ASSERT(special_class[0] != 0);
@@ -4359,7 +4538,7 @@ static bool CompareInverseRanges(ZoneList<CharacterRange>* ranges,
       return false;
     }
     range = ranges->at((i >> 1) + 1);
-    if (special_class[i+1] != range.from() - 1) {
+    if (special_class[i+1] != range.from()) {
       return false;
     }
   }
@@ -4371,14 +4550,17 @@ static bool CompareInverseRanges(ZoneList<CharacterRange>* ranges,
 
 
 static bool CompareRanges(ZoneList<CharacterRange>* ranges,
-                          const uc16* special_class,
+                          const int* special_class,
                           int length) {
+  length--;  // Remove final 0x10000.
+  ASSERT(special_class[length] == 0x10000);
   if (ranges->length() * 2 != length) {
     return false;
   }
   for (int i = 0; i < length; i += 2) {
     CharacterRange range = ranges->at(i >> 1);
-    if (range.from() != special_class[i] || range.to() != special_class[i+1]) {
+    if (range.from() != special_class[i] ||
+        range.to() != special_class[i + 1] - 1) {
       return false;
     }
   }
@@ -4779,27 +4961,31 @@ RegExpNode* RegExpAlternative::ToNode(RegExpCompiler* compiler,
 }
 
 
-static void AddClass(const uc16* elmv,
+static void AddClass(const int* elmv,
                      int elmc,
                      ZoneList<CharacterRange>* ranges) {
+  elmc--;
+  ASSERT(elmv[elmc] == 0x10000);
   for (int i = 0; i < elmc; i += 2) {
-    ASSERT(elmv[i] <= elmv[i + 1]);
-    ranges->Add(CharacterRange(elmv[i], elmv[i + 1]));
+    ASSERT(elmv[i] < elmv[i + 1]);
+    ranges->Add(CharacterRange(elmv[i], elmv[i + 1] - 1));
   }
 }
 
 
-static void AddClassNegated(const uc16 *elmv,
+static void AddClassNegated(const int *elmv,
                             int elmc,
                             ZoneList<CharacterRange>* ranges) {
+  elmc--;
+  ASSERT(elmv[elmc] == 0x10000);
   ASSERT(elmv[0] != 0x0000);
   ASSERT(elmv[elmc-1] != String::kMaxUtf16CodeUnit);
   uc16 last = 0x0000;
   for (int i = 0; i < elmc; i += 2) {
     ASSERT(last <= elmv[i] - 1);
-    ASSERT(elmv[i] <= elmv[i + 1]);
+    ASSERT(elmv[i] < elmv[i + 1]);
     ranges->Add(CharacterRange(last, elmv[i] - 1));
-    last = elmv[i + 1] + 1;
+    last = elmv[i + 1];
   }
   ranges->Add(CharacterRange(last, String::kMaxUtf16CodeUnit));
 }
@@ -4850,8 +5036,8 @@ void CharacterRange::AddClassEscape(uc16 type,
 }
 
 
-Vector<const uc16> CharacterRange::GetWordBounds() {
-  return Vector<const uc16>(kWordRanges, kWordRangeCount);
+Vector<const int> CharacterRange::GetWordBounds() {
+  return Vector<const int>(kWordRanges, kWordRangeCount - 1);
 }
 
 
@@ -4883,7 +5069,7 @@ void CharacterRangeSplitter::Call(uc16 from, DispatchTable::Entry entry) {
 
 
 void CharacterRange::Split(ZoneList<CharacterRange>* base,
-                           Vector<const uc16> overlay,
+                           Vector<const int> overlay,
                            ZoneList<CharacterRange>** included,
                            ZoneList<CharacterRange>** excluded) {
   ASSERT_EQ(NULL, *included);
@@ -4892,7 +5078,7 @@ void CharacterRange::Split(ZoneList<CharacterRange>* base,
   for (int i = 0; i < base->length(); i++)
     table.AddRange(base->at(i), CharacterRangeSplitter::kInBase);
   for (int i = 0; i < overlay.length(); i += 2) {
-    table.AddRange(CharacterRange(overlay[i], overlay[i+1]),
+    table.AddRange(CharacterRange(overlay[i], overlay[i + 1] - 1),
                    CharacterRangeSplitter::kInOverlay);
   }
   CharacterRangeSplitter callback(included, excluded);
@@ -4978,87 +5164,6 @@ bool CharacterRange::IsCanonical(ZoneList<CharacterRange>* ranges) {
   return true;
 }
 
-SetRelation CharacterRange::WordCharacterRelation(
-    ZoneList<CharacterRange>* range) {
-  ASSERT(IsCanonical(range));
-  int i = 0;  // Word character range index.
-  int j = 0;  // Argument range index.
-  ASSERT_NE(0, kWordRangeCount);
-  SetRelation result;
-  if (range->length() == 0) {
-    result.SetElementsInSecondSet();
-    return result;
-  }
-  CharacterRange argument_range = range->at(0);
-  CharacterRange word_range = CharacterRange(kWordRanges[0], kWordRanges[1]);
-  while (i < kWordRangeCount && j < range->length()) {
-    // Check the two ranges for the five cases:
-    // - no overlap.
-    // - partial overlap (there are elements in both ranges that isn't
-    //   in the other, and there are also elements that are in both).
-    // - argument range entirely inside word range.
-    // - word range entirely inside argument range.
-    // - ranges are completely equal.
-
-    // First check for no overlap. The earlier range is not in the other set.
-    if (argument_range.from() > word_range.to()) {
-      // Ranges are disjoint. The earlier word range contains elements that
-      // cannot be in the argument set.
-      result.SetElementsInSecondSet();
-    } else if (word_range.from() > argument_range.to()) {
-      // Ranges are disjoint. The earlier argument range contains elements that
-      // cannot be in the word set.
-      result.SetElementsInFirstSet();
-    } else if (word_range.from() <= argument_range.from() &&
-               word_range.to() >= argument_range.from()) {
-      result.SetElementsInBothSets();
-      // argument range completely inside word range.
-      if (word_range.from() < argument_range.from() ||
-          word_range.to() > argument_range.from()) {
-        result.SetElementsInSecondSet();
-      }
-    } else if (word_range.from() >= argument_range.from() &&
-               word_range.to() <= argument_range.from()) {
-      result.SetElementsInBothSets();
-      result.SetElementsInFirstSet();
-    } else {
-      // There is overlap, and neither is a subrange of the other
-      result.SetElementsInFirstSet();
-      result.SetElementsInSecondSet();
-      result.SetElementsInBothSets();
-    }
-    if (result.NonTrivialIntersection()) {
-      // The result is as (im)precise as we can possibly make it.
-      return result;
-    }
-    // Progress the range(s) with minimal to-character.
-    uc16 word_to = word_range.to();
-    uc16 argument_to = argument_range.to();
-    if (argument_to <= word_to) {
-      j++;
-      if (j < range->length()) {
-        argument_range = range->at(j);
-      }
-    }
-    if (word_to <= argument_to) {
-      i += 2;
-      if (i < kWordRangeCount) {
-        word_range = CharacterRange(kWordRanges[i], kWordRanges[i + 1]);
-      }
-    }
-  }
-  // Check if anything wasn't compared in the loop.
-  if (i < kWordRangeCount) {
-    // word range contains something not in argument range.
-    result.SetElementsInSecondSet();
-  } else if (j < range->length()) {
-    // Argument range contains something not in word range.
-    result.SetElementsInFirstSet();
-  }
-
-  return result;
-}
-
 
 ZoneList<CharacterRange>* CharacterSet::ranges() {
   if (ranges_ == NULL) {
@@ -5191,145 +5296,6 @@ void CharacterRange::Canonicalize(ZoneList<CharacterRange>* character_ranges) {
 }
 
 
-// Utility function for CharacterRange::Merge. Adds a range at the end of
-// a canonicalized range list, if necessary merging the range with the last
-// range of the list.
-static void AddRangeToSet(ZoneList<CharacterRange>* set, CharacterRange range) {
-  if (set == NULL) return;
-  ASSERT(set->length() == 0 || set->at(set->length() - 1).to() < range.from());
-  int n = set->length();
-  if (n > 0) {
-    CharacterRange lastRange = set->at(n - 1);
-    if (lastRange.to() == range.from() - 1) {
-      set->at(n - 1) = CharacterRange(lastRange.from(), range.to());
-      return;
-    }
-  }
-  set->Add(range);
-}
-
-
-static void AddRangeToSelectedSet(int selector,
-                                  ZoneList<CharacterRange>* first_set,
-                                  ZoneList<CharacterRange>* second_set,
-                                  ZoneList<CharacterRange>* intersection_set,
-                                  CharacterRange range) {
-  switch (selector) {
-    case kInsideFirst:
-      AddRangeToSet(first_set, range);
-      break;
-    case kInsideSecond:
-      AddRangeToSet(second_set, range);
-      break;
-    case kInsideBoth:
-      AddRangeToSet(intersection_set, range);
-      break;
-  }
-}
-
-
-
-void CharacterRange::Merge(ZoneList<CharacterRange>* first_set,
-                           ZoneList<CharacterRange>* second_set,
-                           ZoneList<CharacterRange>* first_set_only_out,
-                           ZoneList<CharacterRange>* second_set_only_out,
-                           ZoneList<CharacterRange>* both_sets_out) {
-  // Inputs are canonicalized.
-  ASSERT(CharacterRange::IsCanonical(first_set));
-  ASSERT(CharacterRange::IsCanonical(second_set));
-  // Outputs are empty, if applicable.
-  ASSERT(first_set_only_out == NULL || first_set_only_out->length() == 0);
-  ASSERT(second_set_only_out == NULL || second_set_only_out->length() == 0);
-  ASSERT(both_sets_out == NULL || both_sets_out->length() == 0);
-
-  // Merge sets by iterating through the lists in order of lowest "from" value,
-  // and putting intervals into one of three sets.
-
-  if (first_set->length() == 0) {
-    second_set_only_out->AddAll(*second_set);
-    return;
-  }
-  if (second_set->length() == 0) {
-    first_set_only_out->AddAll(*first_set);
-    return;
-  }
-  // Indices into input lists.
-  int i1 = 0;
-  int i2 = 0;
-  // Cache length of input lists.
-  int n1 = first_set->length();
-  int n2 = second_set->length();
-  // Current range. May be invalid if state is kInsideNone.
-  int from = 0;
-  int to = -1;
-  // Where current range comes from.
-  int state = kInsideNone;
-
-  while (i1 < n1 || i2 < n2) {
-    CharacterRange next_range;
-    int range_source;
-    if (i2 == n2 ||
-        (i1 < n1 && first_set->at(i1).from() < second_set->at(i2).from())) {
-      // Next smallest element is in first set.
-      next_range = first_set->at(i1++);
-      range_source = kInsideFirst;
-    } else {
-      // Next smallest element is in second set.
-      next_range = second_set->at(i2++);
-      range_source = kInsideSecond;
-    }
-    if (to < next_range.from()) {
-      // Ranges disjoint: |current|  |next|
-      AddRangeToSelectedSet(state,
-                            first_set_only_out,
-                            second_set_only_out,
-                            both_sets_out,
-                            CharacterRange(from, to));
-      from = next_range.from();
-      to = next_range.to();
-      state = range_source;
-    } else {
-      if (from < next_range.from()) {
-        AddRangeToSelectedSet(state,
-                              first_set_only_out,
-                              second_set_only_out,
-                              both_sets_out,
-                              CharacterRange(from, next_range.from()-1));
-      }
-      if (to < next_range.to()) {
-        // Ranges overlap:  |current|
-        //                       |next|
-        AddRangeToSelectedSet(state | range_source,
-                              first_set_only_out,
-                              second_set_only_out,
-                              both_sets_out,
-                              CharacterRange(next_range.from(), to));
-        from = to + 1;
-        to = next_range.to();
-        state = range_source;
-      } else {
-        // Range included:    |current| , possibly ending at same character.
-        //                      |next|
-        AddRangeToSelectedSet(
-            state | range_source,
-            first_set_only_out,
-            second_set_only_out,
-            both_sets_out,
-            CharacterRange(next_range.from(), next_range.to()));
-        from = next_range.to() + 1;
-        // If ranges end at same character, both ranges are consumed completely.
-        if (next_range.to() == to) state = kInsideNone;
-      }
-    }
-  }
-  AddRangeToSelectedSet(state,
-                        first_set_only_out,
-                        second_set_only_out,
-                        both_sets_out,
-                        CharacterRange(from, to));
-}
-
-
 void CharacterRange::Negate(ZoneList<CharacterRange>* ranges,
                             ZoneList<CharacterRange>* negated_ranges) {
   ASSERT(CharacterRange::IsCanonical(ranges));
@@ -5353,45 +5319,6 @@ void CharacterRange::Negate(ZoneList<CharacterRange>* ranges,
 }
 
 
-
-// -------------------------------------------------------------------
-// Interest propagation
-
-
-RegExpNode* RegExpNode::TryGetSibling(NodeInfo* info) {
-  for (int i = 0; i < siblings_.length(); i++) {
-    RegExpNode* sibling = siblings_.Get(i);
-    if (sibling->info()->Matches(info))
-      return sibling;
-  }
-  return NULL;
-}
-
-
-RegExpNode* RegExpNode::EnsureSibling(NodeInfo* info, bool* cloned) {
-  ASSERT_EQ(false, *cloned);
-  siblings_.Ensure(this);
-  RegExpNode* result = TryGetSibling(info);
-  if (result != NULL) return result;
-  result = this->Clone();
-  NodeInfo* new_info = result->info();
-  new_info->ResetCompilationState();
-  new_info->AddFromPreceding(info);
-  AddSibling(result);
-  *cloned = true;
-  return result;
-}
-
-
-template <class C>
-static RegExpNode* PropagateToEndpoint(C* node, NodeInfo* info) {
-  NodeInfo full_info(*node->info());
-  full_info.AddFromPreceding(info);
-  bool cloned = false;
-  return RegExpNode::EnsureSibling(node, &full_info, &cloned);
-}
-
-
 // -------------------------------------------------------------------
 // Splay tree
 
@@ -5642,169 +5569,20 @@ void Analysis::VisitBackReference(BackReferenceNode* that) {
 
 void Analysis::VisitAssertion(AssertionNode* that) {
   EnsureAnalyzed(that->on_success());
-  AssertionNode::AssertionNodeType type = that->type();
-  if (type == AssertionNode::AT_BOUNDARY ||
-      type == AssertionNode::AT_NON_BOUNDARY) {
-    // Check if the following character is known to be a word character
-    // or known to not be a word character.
-    ZoneList<CharacterRange>* following_chars = that->FirstCharacterSet();
-
-    CharacterRange::Canonicalize(following_chars);
-
-    SetRelation word_relation =
-        CharacterRange::WordCharacterRelation(following_chars);
-    if (word_relation.Disjoint()) {
-      // Includes the case where following_chars is empty (e.g., end-of-input).
-      // Following character is definitely *not* a word character.
-      type = (type == AssertionNode::AT_BOUNDARY) ?
-                 AssertionNode::AFTER_WORD_CHARACTER :
-                 AssertionNode::AFTER_NONWORD_CHARACTER;
-      that->set_type(type);
-    } else if (word_relation.ContainedIn()) {
-      // Following character is definitely a word character.
-      type = (type == AssertionNode::AT_BOUNDARY) ?
-                 AssertionNode::AFTER_NONWORD_CHARACTER :
-                 AssertionNode::AFTER_WORD_CHARACTER;
-      that->set_type(type);
-    }
-  }
 }
 
 
-ZoneList<CharacterRange>* RegExpNode::FirstCharacterSet() {
-  if (first_character_set_ == NULL) {
-    if (ComputeFirstCharacterSet(kFirstCharBudget) < 0) {
-      // If we can't find an exact solution within the budget, we
-      // set the value to the set of every character, i.e., all characters
-      // are possible.
-      ZoneList<CharacterRange>* all_set = new ZoneList<CharacterRange>(1);
-      all_set->Add(CharacterRange::Everything());
-      first_character_set_ = all_set;
-    }
-  }
-  return first_character_set_;
-}
-
-
-int RegExpNode::ComputeFirstCharacterSet(int budget) {
-  // Default behavior is to not be able to determine the first character.
-  return kComputeFirstCharacterSetFail;
-}
-
-
-int LoopChoiceNode::ComputeFirstCharacterSet(int budget) {
-  budget--;
-  if (budget >= 0) {
-    // Find loop min-iteration. It's the value of the guarded choice node
-    // with a GEQ guard, if any.
-    int min_repetition = 0;
-
-    for (int i = 0; i <= 1; i++) {
-      GuardedAlternative alternative = alternatives()->at(i);
-      ZoneList<Guard*>* guards = alternative.guards();
-      if (guards != NULL && guards->length() > 0) {
-        Guard* guard = guards->at(0);
-        if (guard->op() == Guard::GEQ) {
-          min_repetition = guard->value();
-          break;
-        }
-      }
-    }
-
-    budget = loop_node()->ComputeFirstCharacterSet(budget);
-    if (budget >= 0) {
-      ZoneList<CharacterRange>* character_set =
-          loop_node()->first_character_set();
-      if (body_can_be_zero_length() || min_repetition == 0) {
-        budget = continue_node()->ComputeFirstCharacterSet(budget);
-        if (budget < 0) return budget;
-        ZoneList<CharacterRange>* body_set =
-            continue_node()->first_character_set();
-        ZoneList<CharacterRange>* union_set =
-          new ZoneList<CharacterRange>(Max(character_set->length(),
-                                           body_set->length()));
-        CharacterRange::Merge(character_set,
-                              body_set,
-                              union_set,
-                              union_set,
-                              union_set);
-        character_set = union_set;
-      }
-      set_first_character_set(character_set);
-    }
-  }
-  return budget;
-}
-
-
-int NegativeLookaheadChoiceNode::ComputeFirstCharacterSet(int budget) {
-  budget--;
-  if (budget >= 0) {
-    GuardedAlternative successor = this->alternatives()->at(1);
-    RegExpNode* successor_node = successor.node();
-    budget = successor_node->ComputeFirstCharacterSet(budget);
-    if (budget >= 0) {
-      set_first_character_set(successor_node->first_character_set());
-    }
-  }
-  return budget;
-}
-
-
-// The first character set of an EndNode is unknowable. Just use the
-// default implementation that fails and returns all characters as possible.
-
-
-int AssertionNode::ComputeFirstCharacterSet(int budget) {
-  budget -= 1;
-  if (budget >= 0) {
-    switch (type_) {
-      case AT_END: {
-        set_first_character_set(new ZoneList<CharacterRange>(0));
-        break;
-      }
-      case AT_START:
-      case AT_BOUNDARY:
-      case AT_NON_BOUNDARY:
-      case AFTER_NEWLINE:
-      case AFTER_NONWORD_CHARACTER:
-      case AFTER_WORD_CHARACTER: {
-        ASSERT_NOT_NULL(on_success());
-        budget = on_success()->ComputeFirstCharacterSet(budget);
-        if (budget >= 0) {
-          set_first_character_set(on_success()->first_character_set());
-        }
-        break;
-      }
-    }
-  }
-  return budget;
-}
-
-
-int ActionNode::ComputeFirstCharacterSet(int budget) {
-  if (type_ == POSITIVE_SUBMATCH_SUCCESS) return kComputeFirstCharacterSetFail;
-  budget--;
-  if (budget >= 0) {
-    ASSERT_NOT_NULL(on_success());
-    budget = on_success()->ComputeFirstCharacterSet(budget);
-    if (budget >= 0) {
-      set_first_character_set(on_success()->first_character_set());
-    }
-  }
-  return budget;
+void BackReferenceNode::FillInBMInfo(
+    int offset, BoyerMooreLookahead* bm, bool not_at_start) {
+  // Working out the set of characters that a backreference can match is too
+  // hard, so we just say that any character can match.
+  bm->SetRest(offset);
+  SaveBMInfo(bm, not_at_start, offset);
 }
 
 
-int BackReferenceNode::ComputeFirstCharacterSet(int budget) {
-  // We don't know anything about the first character of a backreference
-  // at this point.
-  // The potential first characters are the first characters of the capture,
-  // and the first characters of the on_success node, depending on whether the
-  // capture can be empty and whether it is known to be participating or known
-  // not to be.
-  return kComputeFirstCharacterSetFail;
-}
+STATIC_ASSERT(BoyerMoorePositionInfo::kMapSize ==
+              RegExpMacroAssembler::kTableSize);
 
 
 void ChoiceNode::FillInBMInfo(
@@ -5814,24 +5592,33 @@ void ChoiceNode::FillInBMInfo(
     GuardedAlternative& alt = alts->at(i);
     if (alt.guards() != NULL && alt.guards()->length() != 0) {
       bm->SetRest(offset);  // Give up trying to fill in info.
+      SaveBMInfo(bm, not_at_start, offset);
       return;
     }
     alt.node()->FillInBMInfo(offset, bm, not_at_start);
   }
+  SaveBMInfo(bm, not_at_start, offset);
 }
 
 
 void TextNode::FillInBMInfo(
-    int offset, BoyerMooreLookahead* bm, bool not_at_start) {
-  if (offset >= bm->length()) return;
+    int initial_offset, BoyerMooreLookahead* bm, bool not_at_start) {
+  if (initial_offset >= bm->length()) return;
+  int offset = initial_offset;
   int max_char = bm->max_char();
   for (int i = 0; i < elements()->length(); i++) {
-    if (offset >= bm->length()) return;
+    if (offset >= bm->length()) {
+      if (initial_offset == 0) set_bm_info(not_at_start, bm);
+      return;
+    }
     TextElement text = elements()->at(i);
     if (text.type == TextElement::ATOM) {
       RegExpAtom* atom = text.data.u_atom;
       for (int j = 0; j < atom->length(); j++, offset++) {
-        if (offset >= bm->length()) return;
+        if (offset >= bm->length()) {
+          if (initial_offset == 0) set_bm_info(not_at_start, bm);
+          return;
+        }
         uc16 character = atom->data()[j];
         if (bm->compiler()->ignore_case()) {
           unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
@@ -5858,67 +5645,23 @@ void TextNode::FillInBMInfo(
           CharacterRange& range = ranges->at(k);
           if (range.from() > max_char) continue;
           int to = Min(max_char, static_cast<int>(range.to()));
-          if (to - range.from() >= BoyerMooreLookahead::kTooManyCharacters) {
-            bm->SetAll(offset);
-            break;
-          }
-          for (int m = range.from(); m <= to; m++) {
-            bm->Set(offset, m);
-          }
+          bm->SetInterval(offset, Interval(range.from(), to));
         }
       }
       offset++;
     }
   }
-  if (offset >= bm->length()) return;
+  if (offset >= bm->length()) {
+    if (initial_offset == 0) set_bm_info(not_at_start, bm);
+    return;
+  }
   on_success()->FillInBMInfo(offset,
                              bm,
                              true);  // Not at start after a text node.
+  if (initial_offset == 0) set_bm_info(not_at_start, bm);
 }
 
 
-int TextNode::ComputeFirstCharacterSet(int budget) {
-  budget--;
-  if (budget >= 0) {
-    ASSERT_NE(0, elements()->length());
-    TextElement text = elements()->at(0);
-    if (text.type == TextElement::ATOM) {
-      RegExpAtom* atom = text.data.u_atom;
-      ASSERT_NE(0, atom->length());
-      uc16 first_char = atom->data()[0];
-      ZoneList<CharacterRange>* range = new ZoneList<CharacterRange>(1);
-      range->Add(CharacterRange(first_char, first_char));
-      set_first_character_set(range);
-    } else {
-      ASSERT(text.type == TextElement::CHAR_CLASS);
-      RegExpCharacterClass* char_class = text.data.u_char_class;
-      ZoneList<CharacterRange>* ranges = char_class->ranges();
-      // TODO(lrn): Canonicalize ranges when they are created
-      // instead of waiting until now.
-      CharacterRange::Canonicalize(ranges);
-      if (char_class->is_negated()) {
-        int length = ranges->length();
-        int new_length = length + 1;
-        if (length > 0) {
-          if (ranges->at(0).from() == 0) new_length--;
-          if (ranges->at(length - 1).to() == String::kMaxUtf16CodeUnit) {
-            new_length--;
-          }
-        }
-        ZoneList<CharacterRange>* negated_ranges =
-            new ZoneList<CharacterRange>(new_length);
-        CharacterRange::Negate(ranges, negated_ranges);
-        set_first_character_set(negated_ranges);
-      } else {
-        set_first_character_set(ranges);
-      }
-    }
-  }
-  return budget;
-}
-
-
-
 // -------------------------------------------------------------------
 // Dispatch table construction
 
@@ -6091,6 +5834,14 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
       node = loop_node;
     }
   }
+  if (is_ascii) {
+    node = node->FilterASCII(RegExpCompiler::kMaxRecursion);
+    // Do it again to propagate the new nodes to places where they were not
+    // put because they had not been calculated yet.
+    if (node != NULL) node = node->FilterASCII(RegExpCompiler::kMaxRecursion);
+  }
+
+  if (node == NULL) node = new EndNode(EndNode::BACKTRACK);
   data->node = node;
   Analysis analysis(ignore_case, is_ascii);
   analysis.EnsureAnalyzed(node);
index 288e995..20313ca 100644 (file)
@@ -40,6 +40,7 @@ class RegExpCompiler;
 class RegExpMacroAssembler;
 class RegExpNode;
 class RegExpTree;
+class BoyerMooreLookahead;
 
 class RegExpImpl {
  public:
@@ -224,48 +225,8 @@ enum ElementInSetsRelation {
 };
 
 
-// Represents the relation of two sets.
-// Sets can be either disjoint, partially or fully overlapping, or equal.
-class SetRelation BASE_EMBEDDED {
- public:
-  // Relation is represented by a bit saying whether there are elements in
-  // one set that is not in the other, and a bit saying that there are elements
-  // that are in both sets.
-
-  // Location of an element. Corresponds to the internal areas of
-  // a Venn diagram.
-  enum {
-    kInFirst = 1 << kInsideFirst,
-    kInSecond = 1 << kInsideSecond,
-    kInBoth = 1 << kInsideBoth
-  };
-  SetRelation() : bits_(0) {}
-  ~SetRelation() {}
-  // Add the existence of objects in a particular
-  void SetElementsInFirstSet() { bits_ |= kInFirst; }
-  void SetElementsInSecondSet() { bits_ |= kInSecond; }
-  void SetElementsInBothSets() { bits_ |= kInBoth; }
-  // Check the currently known relation of the sets (common functions only,
-  // for other combinations, use value() to get the bits and check them
-  // manually).
-  // Sets are completely disjoint.
-  bool Disjoint() { return (bits_ & kInBoth) == 0; }
-  // Sets are equal.
-  bool Equals() { return (bits_ & (kInFirst | kInSecond)) == 0; }
-  // First set contains second.
-  bool Contains() { return (bits_ & kInSecond) == 0; }
-  // Second set contains first.
-  bool ContainedIn() { return (bits_ & kInFirst) == 0; }
-  bool NonTrivialIntersection() {
-    return (bits_ == (kInFirst | kInSecond | kInBoth));
-  }
-  int value() { return bits_; }
-
- private:
-  int bits_;
-};
-
-
+// Represents code units in the range from from_ to to_, both ends are
+// inclusive.
 class CharacterRange {
  public:
   CharacterRange() : from_(0), to_(0) { }
@@ -273,7 +234,7 @@ class CharacterRange {
   CharacterRange(void* null) { ASSERT_EQ(NULL, null); }  //NOLINT
   CharacterRange(uc16 from, uc16 to) : from_(from), to_(to) { }
   static void AddClassEscape(uc16 type, ZoneList<CharacterRange>* ranges);
-  static Vector<const uc16> GetWordBounds();
+  static Vector<const int> GetWordBounds();
   static inline CharacterRange Singleton(uc16 value) {
     return CharacterRange(value, value);
   }
@@ -294,7 +255,7 @@ class CharacterRange {
   bool IsSingleton() { return (from_ == to_); }
   void AddCaseEquivalents(ZoneList<CharacterRange>* ranges, bool is_ascii);
   static void Split(ZoneList<CharacterRange>* base,
-                    Vector<const uc16> overlay,
+                    Vector<const int> overlay,
                     ZoneList<CharacterRange>** included,
                     ZoneList<CharacterRange>** excluded);
   // Whether a range list is in canonical form: Ranges ordered by from value,
@@ -305,28 +266,6 @@ class CharacterRange {
   // adjacent ranges are merged. The resulting list may be shorter than the
   // original, but cannot be longer.
   static void Canonicalize(ZoneList<CharacterRange>* ranges);
-  // Check how the set of characters defined by a CharacterRange list relates
-  // to the set of word characters. List must be in canonical form.
-  static SetRelation WordCharacterRelation(ZoneList<CharacterRange>* ranges);
-  // Takes two character range lists (representing character sets) in canonical
-  // form and merges them.
-  // The characters that are only covered by the first set are added to
-  // first_set_only_out. the characters that are only in the second set are
-  // added to second_set_only_out, and the characters that are in both are
-  // added to both_sets_out.
-  // The pointers to first_set_only_out, second_set_only_out and both_sets_out
-  // should be to empty lists, but they need not be distinct, and may be NULL.
-  // If NULL, the characters are dropped, and if two arguments are the same
-  // pointer, the result is the union of the two sets that would be created
-  // if the pointers had been distinct.
-  // This way, the Merge function can compute all the usual set operations:
-  // union (all three out-sets are equal), intersection (only both_sets_out is
-  // non-NULL), and set difference (only first_set is non-NULL).
-  static void Merge(ZoneList<CharacterRange>* first_set,
-                    ZoneList<CharacterRange>* second_set,
-                    ZoneList<CharacterRange>* first_set_only_out,
-                    ZoneList<CharacterRange>* second_set_only_out,
-                    ZoneList<CharacterRange>* both_sets_out);
   // Negate the contents of a character range in canonical form.
   static void Negate(ZoneList<CharacterRange>* src,
                      ZoneList<CharacterRange>* dst);
@@ -421,90 +360,6 @@ class DispatchTable : public ZoneObject {
 };
 
 
-// Improve the speed that we scan for an initial point where a non-anchored
-// regexp can match by using a Boyer-Moore-like table. This is done by
-// identifying non-greedy non-capturing loops in the nodes that eat any
-// character one at a time.  For example in the middle of the regexp
-// /foo[\s\S]*?bar/ we find such a loop.  There is also such a loop implicitly
-// inserted at the start of any non-anchored regexp.
-//
-// When we have found such a loop we look ahead in the nodes to find the set of
-// characters that can come at given distances. For example for the regexp
-// /.?foo/ we know that there are at least 3 characters ahead of us, and the
-// sets of characters that can occur are [any, [f, o], [o]]. We find a range in
-// the lookahead info where the set of characters is reasonably constrained. In
-// our example this is from index 1 to 2 (0 is not constrained). We can now
-// look 3 characters ahead and if we don't find one of [f, o] (the union of
-// [f, o] and [o]) then we can skip forwards by the range size (in this case 2).
-//
-// For Unicode input strings we do the same, but modulo 128.
-//
-// We also look at the first string fed to the regexp and use that to get a hint
-// of the character frequencies in the inputs. This affects the assessment of
-// whether the set of characters is 'reasonably constrained'.
-//
-// We also have another lookahead mechanism (called quick check in the code),
-// which uses a wide load of multiple characters followed by a mask and compare
-// to determine whether a match is possible at this point.
-class BoyerMooreLookahead {
- public:
-  BoyerMooreLookahead(int length, int map_length, RegExpCompiler* compiler);
-
-  int length() { return length_; }
-  int max_char() { return max_char_; }
-  RegExpCompiler* compiler() { return compiler_; }
-
-  static const int kTooManyCharacters = 32;
-
-  int Count(int map_number) {
-    ZoneList<bool>* map = bitmaps_->at(map_number);
-    if (map == NULL) return map_length_;
-    int count = 0;
-    for (int i = 0; i < map_length_; i++) {
-      if (map->at(i)) count++;
-    }
-    return count;
-  }
-
-  void Set(int map_number, int character) {
-    if (character > max_char_) return;
-    ZoneList<bool>* map = bitmaps_->at(map_number);
-    if (map == NULL) return;
-    map->at(character & (map_length_ - 1)) = true;
-  }
-
-  void SetAll(int map_number) {
-    bitmaps_->at(map_number) = NULL;
-  }
-
-  void SetRest(int from_map) {
-    for (int i = from_map; i < length_; i++) SetAll(i);
-  }
-  bool EmitSkipInstructions(RegExpMacroAssembler* masm);
-
- private:
-  // This is the value obtained by EatsAtLeast.  If we do not have at least this
-  // many characters left in the sample string then the match is bound to fail.
-  // Therefore it is OK to read a character this far ahead of the current match
-  // point.
-  int length_;
-  // We conservatively consider all character values modulo this length.  For
-  // ASCII there is no loss of precision, since this has a value of 128.
-  int map_length_;
-  RegExpCompiler* compiler_;
-  // 0x7f for ASCII, 0xffff for UTF-16.
-  int max_char_;
-  ZoneList<ZoneList<bool>*>* bitmaps_;
-
-  int GetSkipTable(int min_lookahead,
-                   int max_lookahead,
-                   Handle<ByteArray> boolean_skip_table);
-  bool FindWorthwhileInterval(int* from, int* to);
-  int FindBestInterval(
-    int max_number_of_chars, int old_biggest_points, int* from, int* to);
-};
-
-
 #define FOR_EACH_NODE_TYPE(VISIT)                                    \
   VISIT(End)                                                         \
   VISIT(Action)                                                      \
@@ -561,7 +416,8 @@ struct NodeInfo {
         follows_newline_interest(false),
         follows_start_interest(false),
         at_end(false),
-        visited(false) { }
+        visited(false),
+        replacement_calculated(false) { }
 
   // Returns true if the interests and assumptions of this node
   // matches the given one.
@@ -611,25 +467,7 @@ struct NodeInfo {
 
   bool at_end: 1;
   bool visited: 1;
-};
-
-
-class SiblingList {
- public:
-  SiblingList() : list_(NULL) { }
-  int length() {
-    return list_ == NULL ? 0 : list_->length();
-  }
-  void Ensure(RegExpNode* parent) {
-    if (list_ == NULL) {
-      list_ = new ZoneList<RegExpNode*>(2);
-      list_->Add(parent);
-    }
-  }
-  void Add(RegExpNode* node) { list_->Add(node); }
-  RegExpNode* Get(int index) { return list_->at(index); }
- private:
-  ZoneList<RegExpNode*>* list_;
+  bool replacement_calculated: 1;
 };
 
 
@@ -685,9 +523,14 @@ class QuickCheckDetails {
 };
 
 
+extern int kUninitializedRegExpNodePlaceHolder;
+
+
 class RegExpNode: public ZoneObject {
  public:
-  RegExpNode() : first_character_set_(NULL), trace_count_(0) { }
+  RegExpNode() : replacement_(NULL), trace_count_(0) {
+    bm_info_[0] = bm_info_[1] = NULL;
+  }
   virtual ~RegExpNode();
   virtual void Accept(NodeVisitor* visitor) = 0;
   // Generates a goto to this node or actually generates the code at this point.
@@ -731,12 +574,35 @@ class RegExpNode: public ZoneObject {
   // Collects information on the possible code units (mod 128) that can match if
   // we look forward.  This is used for a Boyer-Moore-like string searching
   // implementation.  TODO(erikcorry):  This should share more code with
-  // EatsAtLeast, GetQuickCheckDetails and ComputeFirstCharacterSet.
+  // EatsAtLeast, GetQuickCheckDetails.
   virtual void FillInBMInfo(
       int offset, BoyerMooreLookahead* bm, bool not_at_start) {
     UNREACHABLE();
   }
 
+  // If we know that the input is ASCII then there are some nodes that can
+  // never match.  This method returns a node that can be substituted for
+  // itself, or NULL if the node can never match.
+  virtual RegExpNode* FilterASCII(int depth) { return this; }
+  // Helper for FilterASCII.
+  RegExpNode* replacement() {
+    ASSERT(info()->replacement_calculated);
+    return replacement_;
+  }
+  RegExpNode* set_replacement(RegExpNode* replacement) {
+    info()->replacement_calculated = true;
+    replacement_ =  replacement;
+    return replacement;  // For convenience.
+  }
+
+  // We want to avoid recalculating the lookahead info, so we store it on the
+  // node.  Only info that is for this node is stored.  We can tell that the
+  // info is for this node when offset == 0, so the information is calculated
+  // relative to this node.
+  void SaveBMInfo(BoyerMooreLookahead* bm, bool not_at_start, int offset) {
+    if (offset == 0) set_bm_info(not_at_start, bm);
+  }
+
   Label* label() { return &label_; }
   // If non-generic code is generated for a node (i.e. the node is not at the
   // start of the trace) then it cannot be reused.  This variable sets a limit
@@ -747,72 +613,31 @@ class RegExpNode: public ZoneObject {
 
   NodeInfo* info() { return &info_; }
 
-  void AddSibling(RegExpNode* node) { siblings_.Add(node); }
-
-  // Static version of EnsureSibling that expresses the fact that the
-  // result has the same type as the input.
-  template <class C>
-  static C* EnsureSibling(C* node, NodeInfo* info, bool* cloned) {
-    return static_cast<C*>(node->EnsureSibling(info, cloned));
-  }
-
-  SiblingList* siblings() { return &siblings_; }
-  void set_siblings(SiblingList* other) { siblings_ = *other; }
-
-  // Return the set of possible next characters recognized by the regexp
-  // (or a safe subset, potentially the set of all characters).
-  ZoneList<CharacterRange>* FirstCharacterSet();
-
-  // Compute (if possible within the budget of traversed nodes) the
-  // possible first characters of the input matched by this node and
-  // its continuation. Returns the remaining budget after the computation.
-  // If the budget is spent, the result is negative, and the cached
-  // first_character_set_ value isn't set.
-  virtual int ComputeFirstCharacterSet(int budget);
-
-  // Get and set the cached first character set value.
-  ZoneList<CharacterRange>* first_character_set() {
-    return first_character_set_;
-  }
-  void set_first_character_set(ZoneList<CharacterRange>* character_set) {
-    first_character_set_ = character_set;
+  BoyerMooreLookahead* bm_info(bool not_at_start) {
+    return bm_info_[not_at_start ? 1 : 0];
   }
 
  protected:
   enum LimitResult { DONE, CONTINUE };
-  static const int kComputeFirstCharacterSetFail = -1;
+  RegExpNode* replacement_;
 
   LimitResult LimitVersions(RegExpCompiler* compiler, Trace* trace);
 
-  // Returns a sibling of this node whose interests and assumptions
-  // match the ones in the given node info.  If no sibling exists NULL
-  // is returned.
-  RegExpNode* TryGetSibling(NodeInfo* info);
-
-  // Returns a sibling of this node whose interests match the ones in
-  // the given node info.  The info must not contain any assertions.
-  // If no node exists a new one will be created by cloning the current
-  // node.  The result will always be an instance of the same concrete
-  // class as this node.
-  RegExpNode* EnsureSibling(NodeInfo* info, bool* cloned);
-
-  // Returns a clone of this node initialized using the copy constructor
-  // of its concrete class.  Note that the node may have to be pre-
-  // processed before it is on a usable state.
-  virtual RegExpNode* Clone() = 0;
+  void set_bm_info(bool not_at_start, BoyerMooreLookahead* bm) {
+    bm_info_[not_at_start ? 1 : 0] = bm;
+  }
 
  private:
   static const int kFirstCharBudget = 10;
   Label label_;
   NodeInfo info_;
-  SiblingList siblings_;
-  ZoneList<CharacterRange>* first_character_set_;
   // This variable keeps track of how many times code has been generated for
   // this node (in different traces).  We don't keep track of where the
   // generated code is located unless the code is generated at the start of
   // a trace, in which case it is generic and can be reused by flushing the
   // deferred operations in the current trace and generating a goto.
   int trace_count_;
+  BoyerMooreLookahead* bm_info_[2];
 };
 
 
@@ -833,8 +658,8 @@ class Interval {
     return (from_ <= value) && (value <= to_);
   }
   bool is_empty() { return from_ == kNone; }
-  int from() { return from_; }
-  int to() { return to_; }
+  int from() const { return from_; }
+  int to() const { return to_; }
   static Interval Empty() { return Interval(); }
   static const int kNone = -1;
  private:
@@ -849,10 +674,16 @@ class SeqRegExpNode: public RegExpNode {
       : on_success_(on_success) { }
   RegExpNode* on_success() { return on_success_; }
   void set_on_success(RegExpNode* node) { on_success_ = node; }
+  virtual RegExpNode* FilterASCII(int depth);
   virtual void FillInBMInfo(
       int offset, BoyerMooreLookahead* bm, bool not_at_start) {
     on_success_->FillInBMInfo(offset, bm, not_at_start);
+    if (offset == 0) set_bm_info(not_at_start, bm);
   }
+
+ protected:
+  RegExpNode* FilterSuccessor(int depth);
+
  private:
   RegExpNode* on_success_;
 };
@@ -904,8 +735,6 @@ class ActionNode: public SeqRegExpNode {
   Type type() { return type_; }
   // TODO(erikcorry): We should allow some action nodes in greedy loops.
   virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
-  virtual ActionNode* Clone() { return new ActionNode(*this); }
-  virtual int ComputeFirstCharacterSet(int budget);
 
  private:
   union {
@@ -972,13 +801,8 @@ class TextNode: public SeqRegExpNode {
       RegExpCompiler* compiler);
   virtual void FillInBMInfo(
       int offset, BoyerMooreLookahead* bm, bool not_at_start);
-  virtual TextNode* Clone() {
-    TextNode* result = new TextNode(*this);
-    result->CalculateOffsets();
-    return result;
-  }
   void CalculateOffsets();
-  virtual int ComputeFirstCharacterSet(int budget);
+  virtual RegExpNode* FilterASCII(int depth);
 
  private:
   enum TextEmitPassType {
@@ -1009,12 +833,7 @@ class AssertionNode: public SeqRegExpNode {
     AT_START,
     AT_BOUNDARY,
     AT_NON_BOUNDARY,
-    AFTER_NEWLINE,
-    // Types not directly expressible in regexp syntax.
-    // Used for modifying a boundary node if its following character is
-    // known to be word and/or non-word.
-    AFTER_NONWORD_CHARACTER,
-    AFTER_WORD_CHARACTER
+    AFTER_NEWLINE
   };
   static AssertionNode* AtEnd(RegExpNode* on_success) {
     return new AssertionNode(AT_END, on_success);
@@ -1042,12 +861,15 @@ class AssertionNode: public SeqRegExpNode {
                                     bool not_at_start);
   virtual void FillInBMInfo(
       int offset, BoyerMooreLookahead* bm, bool not_at_start);
-  virtual int ComputeFirstCharacterSet(int budget);
-  virtual AssertionNode* Clone() { return new AssertionNode(*this); }
   AssertionNodeType type() { return type_; }
   void set_type(AssertionNodeType type) { type_ = type; }
 
  private:
+  void EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace);
+  enum IfPrevious { kIsNonWord, kIsWord };
+  void BacktrackIfPrevious(RegExpCompiler* compiler,
+                           Trace* trace,
+                           IfPrevious backtrack_if_previous);
   AssertionNode(AssertionNodeType t, RegExpNode* on_success)
       : SeqRegExpNode(on_success), type_(t) { }
   AssertionNodeType type_;
@@ -1076,13 +898,7 @@ class BackReferenceNode: public SeqRegExpNode {
     return;
   }
   virtual void FillInBMInfo(
-      int offset, BoyerMooreLookahead* bm, bool not_at_start) {
-    // Working out the set of characters that a backreference can match is too
-    // hard, so we just say that any character can match.
-    bm->SetRest(offset);
-  }
-  virtual BackReferenceNode* Clone() { return new BackReferenceNode(*this); }
-  virtual int ComputeFirstCharacterSet(int budget);
+      int offset, BoyerMooreLookahead* bm, bool not_at_start);
 
  private:
   int start_reg_;
@@ -1111,7 +927,7 @@ class EndNode: public RegExpNode {
     // Returning 0 from EatsAtLeast should ensure we never get here.
     UNREACHABLE();
   }
-  virtual EndNode* Clone() { return new EndNode(*this); }
+
  private:
   Action action_;
 };
@@ -1198,13 +1014,13 @@ class ChoiceNode: public RegExpNode {
                                     bool not_at_start);
   virtual void FillInBMInfo(
       int offset, BoyerMooreLookahead* bm, bool not_at_start);
-  virtual ChoiceNode* Clone() { return new ChoiceNode(*this); }
 
   bool being_calculated() { return being_calculated_; }
   bool not_at_start() { return not_at_start_; }
   void set_not_at_start() { not_at_start_ = true; }
   void set_being_calculated(bool b) { being_calculated_ = b; }
   virtual bool try_to_emit_quick_check_for_alternative(int i) { return true; }
+  virtual RegExpNode* FilterASCII(int depth);
 
  protected:
   int GreedyLoopTextLengthForAlternative(GuardedAlternative* alternative);
@@ -1249,6 +1065,7 @@ class NegativeLookaheadChoiceNode: public ChoiceNode {
   virtual void FillInBMInfo(
       int offset, BoyerMooreLookahead* bm, bool not_at_start) {
     alternatives_->at(1).node()->FillInBMInfo(offset, bm, not_at_start);
+    if (offset == 0) set_bm_info(not_at_start, bm);
   }
   // For a negative lookahead we don't emit the quick check for the
   // alternative that is expected to fail.  This is because quick check code
@@ -1256,7 +1073,7 @@ class NegativeLookaheadChoiceNode: public ChoiceNode {
   // characters, but on a negative lookahead the negative branch did not take
   // part in that calculation (EatsAtLeast) so the assumptions don't hold.
   virtual bool try_to_emit_quick_check_for_alternative(int i) { return i != 0; }
-  virtual int ComputeFirstCharacterSet(int budget);
+  virtual RegExpNode* FilterASCII(int depth);
 };
 
 
@@ -1279,12 +1096,11 @@ class LoopChoiceNode: public ChoiceNode {
                                     bool not_at_start);
   virtual void FillInBMInfo(
       int offset, BoyerMooreLookahead* bm, bool not_at_start);
-  virtual int ComputeFirstCharacterSet(int budget);
-  virtual LoopChoiceNode* Clone() { return new LoopChoiceNode(*this); }
   RegExpNode* loop_node() { return loop_node_; }
   RegExpNode* continue_node() { return continue_node_; }
   bool body_can_be_zero_length() { return body_can_be_zero_length_; }
   virtual void Accept(NodeVisitor* visitor);
+  virtual RegExpNode* FilterASCII(int depth);
 
  private:
   // AddAlternative is made private for loop nodes because alternatives
@@ -1300,6 +1116,146 @@ class LoopChoiceNode: public ChoiceNode {
 };
 
 
+// Improve the speed that we scan for an initial point where a non-anchored
+// regexp can match by using a Boyer-Moore-like table. This is done by
+// identifying non-greedy non-capturing loops in the nodes that eat any
+// character one at a time.  For example in the middle of the regexp
+// /foo[\s\S]*?bar/ we find such a loop.  There is also such a loop implicitly
+// inserted at the start of any non-anchored regexp.
+//
+// When we have found such a loop we look ahead in the nodes to find the set of
+// characters that can come at given distances. For example for the regexp
+// /.?foo/ we know that there are at least 3 characters ahead of us, and the
+// sets of characters that can occur are [any, [f, o], [o]]. We find a range in
+// the lookahead info where the set of characters is reasonably constrained. In
+// our example this is from index 1 to 2 (0 is not constrained). We can now
+// look 3 characters ahead and if we don't find one of [f, o] (the union of
+// [f, o] and [o]) then we can skip forwards by the range size (in this case 2).
+//
+// For Unicode input strings we do the same, but modulo 128.
+//
+// We also look at the first string fed to the regexp and use that to get a hint
+// of the character frequencies in the inputs. This affects the assessment of
+// whether the set of characters is 'reasonably constrained'.
+//
+// We also have another lookahead mechanism (called quick check in the code),
+// which uses a wide load of multiple characters followed by a mask and compare
+// to determine whether a match is possible at this point.
+enum ContainedInLattice {
+  kNotYet = 0,
+  kLatticeIn = 1,
+  kLatticeOut = 2,
+  kLatticeUnknown = 3  // Can also mean both in and out.
+};
+
+
+inline ContainedInLattice Combine(ContainedInLattice a, ContainedInLattice b) {
+  return static_cast<ContainedInLattice>(a | b);
+}
+
+
+ContainedInLattice AddRange(ContainedInLattice a,
+                            const int* ranges,
+                            int ranges_size,
+                            Interval new_range);
+
+
+class BoyerMoorePositionInfo : public ZoneObject {
+ public:
+  BoyerMoorePositionInfo()
+      : map_(new ZoneList<bool>(kMapSize)),
+        map_count_(0),
+        w_(kNotYet),
+        s_(kNotYet),
+        d_(kNotYet),
+        surrogate_(kNotYet) {
+     for (int i = 0; i < kMapSize; i++) {
+       map_->Add(false);
+     }
+  }
+
+  bool& at(int i) { return map_->at(i); }
+
+  static const int kMapSize = 128;
+  static const int kMask = kMapSize - 1;
+
+  int map_count() const { return map_count_; }
+
+  void Set(int character);
+  void SetInterval(const Interval& interval);
+  void SetAll();
+  bool is_non_word() { return w_ == kLatticeOut; }
+  bool is_word() { return w_ == kLatticeIn; }
+
+ private:
+  ZoneList<bool>* map_;
+  int map_count_;  // Number of set bits in the map.
+  ContainedInLattice w_;  // The \w character class.
+  ContainedInLattice s_;  // The \s character class.
+  ContainedInLattice d_;  // The \d character class.
+  ContainedInLattice surrogate_;  // Surrogate UTF-16 code units.
+};
+
+
+class BoyerMooreLookahead : public ZoneObject {
+ public:
+  BoyerMooreLookahead(int length, RegExpCompiler* compiler);
+
+  int length() { return length_; }
+  int max_char() { return max_char_; }
+  RegExpCompiler* compiler() { return compiler_; }
+
+  int Count(int map_number) {
+    return bitmaps_->at(map_number)->map_count();
+  }
+
+  BoyerMoorePositionInfo* at(int i) { return bitmaps_->at(i); }
+
+  void Set(int map_number, int character) {
+    if (character > max_char_) return;
+    BoyerMoorePositionInfo* info = bitmaps_->at(map_number);
+    info->Set(character);
+  }
+
+  void SetInterval(int map_number, const Interval& interval) {
+    if (interval.from() > max_char_) return;
+    BoyerMoorePositionInfo* info = bitmaps_->at(map_number);
+    if (interval.to() > max_char_) {
+      info->SetInterval(Interval(interval.from(), max_char_));
+    } else {
+      info->SetInterval(interval);
+    }
+  }
+
+  void SetAll(int map_number) {
+    bitmaps_->at(map_number)->SetAll();
+  }
+
+  void SetRest(int from_map) {
+    for (int i = from_map; i < length_; i++) SetAll(i);
+  }
+  bool EmitSkipInstructions(RegExpMacroAssembler* masm);
+
+ private:
+  // This is the value obtained by EatsAtLeast.  If we do not have at least this
+  // many characters left in the sample string then the match is bound to fail.
+  // Therefore it is OK to read a character this far ahead of the current match
+  // point.
+  int length_;
+  RegExpCompiler* compiler_;
+  // 0x7f for ASCII, 0xffff for UTF-16.
+  int max_char_;
+  ZoneList<BoyerMoorePositionInfo*>* bitmaps_;
+
+  int GetSkipTable(int min_lookahead,
+                   int max_lookahead,
+                   Handle<ByteArray> boolean_skip_table);
+  bool FindWorthwhileInterval(int* from, int* to);
+  int FindBestInterval(
+    int max_number_of_chars, int old_biggest_points, int* from, int* to);
+};
+
+
 // There are many ways to generate code for a node.  This class encapsulates
 // the current way we should be generating.  In other words it encapsulates
 // the current state of the code generator.  The effect of this is that we
index 4c09b0d..9d68b8c 100644 (file)
@@ -250,7 +250,7 @@ struct LazyInstance {
 
 
 template <typename T,
-          typename CreateTrait = DefaultConstructTrait<T>,
+          typename CreateTrait = DefaultCreateTrait<T>,
           typename InitOnceTrait = SingleThreadInitOnceTrait,
           typename DestroyTrait = LeakyInstanceTrait<T> >
 struct LazyDynamicInstance {
index 7c2c83f..6cf3bad 100644 (file)
@@ -137,6 +137,14 @@ bool List<T, P>::RemoveElement(const T& elm) {
 
 
 template<typename T, class P>
+void List<T, P>::Allocate(int length) {
+  DeleteData(data_);
+  Initialize(length);
+  length_ = length;
+}
+
+
+template<typename T, class P>
 void List<T, P>::Clear() {
   DeleteData(data_);
   Initialize(0);
@@ -207,20 +215,19 @@ void List<T, P>::Initialize(int capacity) {
 }
 
 
-template <typename T>
-int SortedListBSearch(
-    const List<T>& list, T elem, int (*cmp)(const T* x, const T* y)) {
+template <typename T, typename P>
+int SortedListBSearch(const List<T>& list, P cmp) {
   int low = 0;
   int high = list.length() - 1;
   while (low <= high) {
     int mid = (low + high) / 2;
     T mid_elem = list[mid];
 
-    if (cmp(&mid_elem, &elem) > 0) {
+    if (cmp(&mid_elem) > 0) {
       high = mid - 1;
       continue;
     }
-    if (cmp(&mid_elem, &elem) < 0) {
+    if (cmp(&mid_elem) < 0) {
       low = mid + 1;
       continue;
     }
@@ -231,9 +238,21 @@ int SortedListBSearch(
 }
 
 
+template<typename T>
+class ElementCmp {
+ public:
+  explicit ElementCmp(T e) : elem_(e) {}
+  int operator()(const T* other) {
+    return PointerValueCompare(other, &elem_);
+  }
+ private:
+  T elem_;
+};
+
+
 template <typename T>
 int SortedListBSearch(const List<T>& list, T elem) {
-  return SortedListBSearch<T>(list, elem, PointerValueCompare<T>);
+  return SortedListBSearch<T, ElementCmp<T> > (list, ElementCmp<T>(elem));
 }
 
 
index adddea4..7350c0d 100644 (file)
@@ -117,6 +117,9 @@ class List {
   // pointer type. Returns the removed element.
   INLINE(T RemoveLast()) { return Remove(length_ - 1); }
 
+  // Deletes current list contents and allocates space for 'length' elements.
+  INLINE(void Allocate(int length));
+
   // Clears the list by setting the length to zero. Even if T is a
   // pointer type, clearing the list doesn't delete the entries.
   INLINE(void Clear());
@@ -173,9 +176,11 @@ typedef List<Handle<Code> > CodeHandleList;
 
 // Perform binary search for an element in an already sorted
 // list. Returns the index of the element of -1 if it was not found.
-template <typename T>
-int SortedListBSearch(
-    const List<T>& list, T elem, int (*cmp)(const T* x, const T* y));
+// |cmp| is a predicate that takes a pointer to an element of the List
+// and returns +1 if it is greater, -1 if it is less than the element
+// being searched.
+template <typename T, class P>
+int SortedListBSearch(const List<T>& list, P cmp);
 template <typename T>
 int SortedListBSearch(const List<T>& list, T elem);
 
index 4396c73..9534f9e 100644 (file)
@@ -958,7 +958,7 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
           }
         }
 
-        if (instr->IsMarkedAsCall() || instr->IsMarkedAsSaveDoubles()) {
+        if (instr->IsMarkedAsCall()) {
           for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
             if (output == NULL || !output->IsDoubleRegister() ||
                 output->index() != i) {
index abfb0f6..4463c93 100644 (file)
@@ -159,6 +159,11 @@ Debug.LiveEdit = new function() {
 
     preview_description.stack_modified = dropped_functions_number != 0;
 
+    // Our current implementation requires client to manually issue "step in"
+    // command for correct stack state.
+    preview_description.stack_update_needs_step_in =
+        preview_description.stack_modified;
+
     // Start with breakpoints. Convert their line/column positions and
     // temporary remove.
     var break_points_restorer = TemporaryRemoveBreakPoints(script, change_log);
index 9c5294a..22b8250 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,6 +30,7 @@
 
 #include "liveedit.h"
 
+#include "code-stubs.h"
 #include "compilation-cache.h"
 #include "compiler.h"
 #include "debug.h"
@@ -1475,26 +1476,36 @@ static const char* DropFrames(Vector<StackFrame*> frames,
   // Check the nature of the top frame.
   Isolate* isolate = Isolate::Current();
   Code* pre_top_frame_code = pre_top_frame->LookupCode();
+  bool frame_has_padding;
   if (pre_top_frame_code->is_inline_cache_stub() &&
       pre_top_frame_code->ic_state() == DEBUG_BREAK) {
     // OK, we can drop inline cache calls.
     *mode = Debug::FRAME_DROPPED_IN_IC_CALL;
+    frame_has_padding = Debug::FramePaddingLayout::kIsSupported;
   } else if (pre_top_frame_code ==
              isolate->debug()->debug_break_slot()) {
     // OK, we can drop debug break slot.
     *mode = Debug::FRAME_DROPPED_IN_DEBUG_SLOT_CALL;
+    frame_has_padding = Debug::FramePaddingLayout::kIsSupported;
   } else if (pre_top_frame_code ==
       isolate->builtins()->builtin(
           Builtins::kFrameDropper_LiveEdit)) {
     // OK, we can drop our own code.
     *mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
+    frame_has_padding = false;
   } else if (pre_top_frame_code ==
       isolate->builtins()->builtin(Builtins::kReturn_DebugBreak)) {
     *mode = Debug::FRAME_DROPPED_IN_RETURN_CALL;
+    frame_has_padding = Debug::FramePaddingLayout::kIsSupported;
   } else if (pre_top_frame_code->kind() == Code::STUB &&
-      pre_top_frame_code->major_key()) {
-    // Entry from our unit tests, it's fine, we support this case.
+      pre_top_frame_code->major_key() == CodeStub::CEntry) {
+    // Entry from our unit tests on 'debugger' statement.
+    // It's fine, we support this case.
     *mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
+    // We don't have a padding from 'debugger' statement call.
+    // Here the stub is CEntry, it's not debug-only and can't be padded.
+    // If anyone would complain, a proxy padded stub could be added.
+    frame_has_padding = false;
   } else {
     return "Unknown structure of stack above changing function";
   }
@@ -1504,8 +1515,49 @@ static const char* DropFrames(Vector<StackFrame*> frames,
       - Debug::kFrameDropperFrameSize * kPointerSize  // Size of the new frame.
       + kPointerSize;  // Bigger address end is exclusive.
 
+  Address* top_frame_pc_address = top_frame->pc_address();
+
+  // top_frame may be damaged below this point. Do not used it.
+  ASSERT(!(top_frame = NULL));
+
   if (unused_stack_top > unused_stack_bottom) {
-    return "Not enough space for frame dropper frame";
+    if (frame_has_padding) {
+      int shortage_bytes =
+          static_cast<int>(unused_stack_top - unused_stack_bottom);
+
+      Address padding_start = pre_top_frame->fp() -
+          Debug::FramePaddingLayout::kFrameBaseSize * kPointerSize;
+
+      Address padding_pointer = padding_start;
+      Smi* padding_object =
+          Smi::FromInt(Debug::FramePaddingLayout::kPaddingValue);
+      while (Memory::Object_at(padding_pointer) == padding_object) {
+        padding_pointer -= kPointerSize;
+      }
+      int padding_counter =
+          Smi::cast(Memory::Object_at(padding_pointer))->value();
+      if (padding_counter * kPointerSize < shortage_bytes) {
+        return "Not enough space for frame dropper frame "
+            "(even with padding frame)";
+      }
+      Memory::Object_at(padding_pointer) =
+          Smi::FromInt(padding_counter - shortage_bytes / kPointerSize);
+
+      StackFrame* pre_pre_frame = frames[top_frame_index - 2];
+
+      memmove(padding_start + kPointerSize - shortage_bytes,
+          padding_start + kPointerSize,
+          Debug::FramePaddingLayout::kFrameBaseSize * kPointerSize);
+
+      pre_top_frame->UpdateFp(pre_top_frame->fp() - shortage_bytes);
+      pre_pre_frame->SetCallerFp(pre_top_frame->fp());
+      unused_stack_top -= shortage_bytes;
+
+      STATIC_ASSERT(sizeof(Address) == kPointerSize);
+      top_frame_pc_address -= shortage_bytes / kPointerSize;
+    } else {
+      return "Not enough space for frame dropper frame";
+    }
   }
 
   // Committing now. After this point we should return only NULL value.
@@ -1515,7 +1567,7 @@ static const char* DropFrames(Vector<StackFrame*> frames,
   ASSERT(!FixTryCatchHandler(pre_top_frame, bottom_js_frame));
 
   Handle<Code> code = Isolate::Current()->builtins()->FrameDropper_LiveEdit();
-  top_frame->set_pc(code->entry());
+  *top_frame_pc_address = code->entry();
   pre_top_frame->SetCallerFp(bottom_js_frame->fp());
 
   *restarter_frame_function_pointer =
index 93287ae..08fa82e 100644 (file)
@@ -196,6 +196,7 @@ macro SET_UTC_DATE_VALUE(arg, value) = (%DateSetValue(arg, value, 1));
 macro SET_LOCAL_DATE_VALUE(arg, value) = (%DateSetValue(arg, value, 0));
 
 # Last input and last subject of regexp matches.
+const LAST_SUBJECT_INDEX = 1;
 macro LAST_SUBJECT(array) = ((array)[1]);
 macro LAST_INPUT(array) = ((array)[2]);
 
@@ -204,6 +205,15 @@ macro CAPTURE(index) = (3 + (index));
 const CAPTURE0 = 3;
 const CAPTURE1 = 4;
 
+# For the regexp capture override array.  This has the same
+# format as the arguments to a function called from
+# String.prototype.replace.
+macro OVERRIDE_MATCH(override) = ((override)[0]);
+macro OVERRIDE_POS(override) = ((override)[(override).length - 2]);
+macro OVERRIDE_SUBJECT(override) = ((override)[(override).length - 1]);
+# 1-based so index of 1 returns the first capture
+macro OVERRIDE_CAPTURE(override, index) = ((override)[(index)]);
+
 # PropertyDescriptor return value indices - must match
 # PropertyDescriptorIndices in runtime.cc.
 const IS_ACCESSOR_INDEX = 0;
index 43f6b89..2f7e31f 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -52,6 +52,15 @@ void MarkCompactCollector::SetFlags(int flags) {
 }
 
 
+bool MarkCompactCollector::MarkObjectAndPush(HeapObject* obj) {
+  if (MarkObjectWithoutPush(obj)) {
+    marking_deque_.PushBlack(obj);
+    return true;
+  }
+  return false;
+}
+
+
 void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
   ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
   if (!mark_bit.Get()) {
@@ -62,16 +71,13 @@ void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
 }
 
 
-bool MarkCompactCollector::MarkObjectWithoutPush(HeapObject* object) {
-  MarkBit mark = Marking::MarkBitFrom(object);
-  bool old_mark = mark.Get();
-  if (!old_mark) SetMark(object, mark);
-  return old_mark;
-}
-
-
-void MarkCompactCollector::MarkObjectAndPush(HeapObject* object) {
-  if (!MarkObjectWithoutPush(object)) marking_deque_.PushBlack(object);
+bool MarkCompactCollector::MarkObjectWithoutPush(HeapObject* obj) {
+  MarkBit mark_bit = Marking::MarkBitFrom(obj);
+  if (!mark_bit.Get()) {
+    SetMark(obj, mark_bit);
+    return true;
+  }
+  return false;
 }
 
 
index 866d359..c455564 100644 (file)
@@ -64,13 +64,13 @@ MarkCompactCollector::MarkCompactCollector() :  // NOLINT
       abort_incremental_marking_(false),
       compacting_(false),
       was_marked_incrementally_(false),
-      collect_maps_(FLAG_collect_maps),
       flush_monomorphic_ics_(false),
       tracer_(NULL),
       migration_slots_buffer_(NULL),
       heap_(NULL),
       code_flusher_(NULL),
-      encountered_weak_maps_(NULL) { }
+      encountered_weak_maps_(NULL),
+      marker_(this, this) { }
 
 
 #ifdef DEBUG
@@ -282,7 +282,7 @@ void MarkCompactCollector::CollectGarbage() {
   MarkLiveObjects();
   ASSERT(heap_->incremental_marking()->IsStopped());
 
-  if (collect_maps_) ClearNonLiveTransitions();
+  if (FLAG_collect_maps) ClearNonLiveTransitions();
 
   ClearWeakMaps();
 
@@ -294,9 +294,7 @@ void MarkCompactCollector::CollectGarbage() {
 
   SweepSpaces();
 
-  if (!collect_maps_) ReattachInitialMaps();
-
-  heap_->isolate()->inner_pointer_to_code_cache()->Flush();
+  if (!FLAG_collect_maps) ReattachInitialMaps();
 
   Finish();
 
@@ -337,6 +335,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
     MarkBit mark_bit = Marking::MarkBitFrom(obj);
     ASSERT(Marking::IsWhite(mark_bit));
+    ASSERT_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
   }
 }
 #endif
@@ -373,6 +372,7 @@ void MarkCompactCollector::ClearMarkbits() {
     MarkBit mark_bit = Marking::MarkBitFrom(obj);
     mark_bit.Clear();
     mark_bit.Next().Clear();
+    Page::FromAddress(obj->address())->ResetLiveBytes();
   }
 }
 
@@ -658,11 +658,6 @@ void MarkCompactCollector::AbortCompaction() {
 void MarkCompactCollector::Prepare(GCTracer* tracer) {
   was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
 
-  // Disable collection of maps if incremental marking is enabled.
-  // Map collection algorithm relies on a special map transition tree traversal
-  // order which is not implemented for incremental marking.
-  collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_;
-
   // Monomorphic ICs are preserved when possible, but need to be flushed
   // when they might be keeping a Context alive, or when the heap is about
   // to be serialized.
@@ -680,7 +675,6 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
 
   ASSERT(!FLAG_never_compact || !FLAG_always_compact);
 
-  if (collect_maps_) CreateBackPointers();
 #ifdef ENABLE_GDB_JIT_INTERFACE
   if (FLAG_gdbjit) {
     // If GDBJIT interface is active disable compaction.
@@ -1150,9 +1144,10 @@ class StaticMarkingVisitor : public StaticVisitorBase {
     JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object);
 
     // Enqueue weak map in linked list of encountered weak maps.
-    ASSERT(weak_map->next() == Smi::FromInt(0));
-    weak_map->set_next(collector->encountered_weak_maps());
-    collector->set_encountered_weak_maps(weak_map);
+    if (weak_map->next() == Smi::FromInt(0)) {
+      weak_map->set_next(collector->encountered_weak_maps());
+      collector->set_encountered_weak_maps(weak_map);
+    }
 
     // Skip visiting the backing hash table containing the mappings.
     int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object);
@@ -1168,9 +1163,15 @@ class StaticMarkingVisitor : public StaticVisitorBase {
         object_size);
 
     // Mark the backing hash table without pushing it on the marking stack.
-    ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
-    ASSERT(!MarkCompactCollector::IsMarked(table));
-    collector->SetMark(table, Marking::MarkBitFrom(table));
+    Object* table_object = weak_map->table();
+    if (!table_object->IsHashTable()) return;
+    ObjectHashTable* table = ObjectHashTable::cast(table_object);
+    Object** table_slot =
+        HeapObject::RawField(weak_map, JSWeakMap::kTableOffset);
+    MarkBit table_mark = Marking::MarkBitFrom(table);
+    collector->RecordSlot(table_slot, table_slot, table);
+    if (!table_mark.Get()) collector->SetMark(table, table_mark);
+    // Recording the map slot can be skipped, because maps are not compacted.
     collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map()));
     ASSERT(MarkCompactCollector::IsMarked(table->map()));
   }
@@ -1179,16 +1180,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
     Heap* heap = map->GetHeap();
     Code* code = reinterpret_cast<Code*>(object);
     if (FLAG_cleanup_code_caches_at_gc) {
-      Object* raw_info = code->type_feedback_info();
-      if (raw_info->IsTypeFeedbackInfo()) {
-        TypeFeedbackCells* type_feedback_cells =
-            TypeFeedbackInfo::cast(raw_info)->type_feedback_cells();
-        for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
-          ASSERT(type_feedback_cells->AstId(i)->IsSmi());
-          JSGlobalPropertyCell* cell = type_feedback_cells->Cell(i);
-          cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
-        }
-      }
+      code->ClearTypeFeedbackCells(heap);
     }
     code->CodeIterateBody<StaticMarkingVisitor>(heap);
   }
@@ -1390,6 +1382,12 @@ class StaticMarkingVisitor : public StaticVisitorBase {
 
   static void VisitSharedFunctionInfoAndFlushCode(Map* map,
                                                   HeapObject* object) {
+    Heap* heap = map->GetHeap();
+    SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
+    if (shared->ic_age() != heap->global_ic_age()) {
+      shared->ResetForNewContext(heap->global_ic_age());
+    }
+
     MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
     if (!collector->is_code_flushing_enabled()) {
       VisitSharedFunctionInfoGeneric(map, object);
@@ -1406,10 +1404,6 @@ class StaticMarkingVisitor : public StaticVisitorBase {
 
     if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
 
-    if (shared->ic_age() != heap->global_ic_age()) {
-      shared->ResetForNewContext(heap->global_ic_age());
-    }
-
     if (!known_flush_code_candidate) {
       known_flush_code_candidate = IsFlushable(heap, shared);
       if (known_flush_code_candidate) {
@@ -1523,12 +1517,6 @@ class StaticMarkingVisitor : public StaticVisitorBase {
                              JSFunction::kCodeEntryOffset + kPointerSize),
         HeapObject::RawField(object,
                              JSFunction::kNonWeakFieldsEndOffset));
-
-    // Don't visit the next function list field as it is a weak reference.
-    Object** next_function =
-        HeapObject::RawField(object, JSFunction::kNextFunctionLinkOffset);
-    heap->mark_compact_collector()->RecordSlot(
-        next_function, next_function, *next_function);
   }
 
   static inline void VisitJSRegExpFields(Map* map,
@@ -1763,9 +1751,8 @@ class SymbolTableCleaner : public ObjectVisitor {
 
         // Since no objects have yet been moved we can safely access the map of
         // the object.
-        if (o->IsExternalString() ||
-            (o->IsHeapObject() && HeapObject::cast(o)->map()->has_external_resource())) {
-          heap_->FinalizeExternalString(HeapObject::cast(*p));
+        if (o->IsExternalString()) {
+          heap_->FinalizeExternalString(String::cast(*p));
         }
         // Set the entry to the_hole_value (as deleted).
         *p = heap_->the_hole_value();
@@ -1806,11 +1793,11 @@ void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
     heap_->ClearCacheOnMap(map);
 
     // When map collection is enabled we have to mark through map's transitions
-    // in a special way to make transition links weak.
-    // Only maps for subclasses of JSReceiver can have transitions.
+    // in a special way to make transition links weak. Only maps for subclasses
+    // of JSReceiver can have transitions.
     STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
-    if (collect_maps_ && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
-      MarkMapContents(map);
+    if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
+      marker_.MarkMapContents(map);
     } else {
       marking_deque_.PushBlack(map);
     }
@@ -1820,79 +1807,86 @@ void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
 }
 
 
-void MarkCompactCollector::MarkMapContents(Map* map) {
+// Force instantiation of template instances.
+template void Marker<IncrementalMarking>::MarkMapContents(Map* map);
+template void Marker<MarkCompactCollector>::MarkMapContents(Map* map);
+
+
+template <class T>
+void Marker<T>::MarkMapContents(Map* map) {
   // Mark prototype transitions array but don't push it into marking stack.
   // This will make references from it weak. We will clean dead prototype
   // transitions in ClearNonLiveTransitions.
-  FixedArray* prototype_transitions = map->prototype_transitions();
-  MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
-  if (!mark.Get()) {
-    mark.Set();
-    MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(),
-                                          prototype_transitions->Size());
+  Object** proto_trans_slot =
+      HeapObject::RawField(map, Map::kPrototypeTransitionsOrBackPointerOffset);
+  HeapObject* prototype_transitions = HeapObject::cast(*proto_trans_slot);
+  if (prototype_transitions->IsFixedArray()) {
+    mark_compact_collector()->RecordSlot(proto_trans_slot,
+                                         proto_trans_slot,
+                                         prototype_transitions);
+    MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
+    if (!mark.Get()) {
+      mark.Set();
+      MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(),
+                                            prototype_transitions->Size());
+    }
   }
 
-  Object** raw_descriptor_array_slot =
+  // Make sure that the back pointer stored either in the map itself or inside
+  // its prototype transitions array is marked. Treat pointers in the descriptor
+  // array as weak and also mark that array to prevent visiting it later.
+  base_marker()->MarkObjectAndPush(HeapObject::cast(map->GetBackPointer()));
+
+  Object** descriptor_array_slot =
       HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset);
-  Object* raw_descriptor_array = *raw_descriptor_array_slot;
-  if (!raw_descriptor_array->IsSmi()) {
-    MarkDescriptorArray(
-        reinterpret_cast<DescriptorArray*>(raw_descriptor_array));
+  Object* descriptor_array = *descriptor_array_slot;
+  if (!descriptor_array->IsSmi()) {
+    MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(descriptor_array));
+  }
+
+  // Mark the Object* fields of the Map. Since the descriptor array has been
+  // marked already, it is fine that one of these fields contains a pointer
+  // to it. But make sure to skip back pointer and prototype transitions.
+  STATIC_ASSERT(Map::kPointerFieldsEndOffset ==
+      Map::kPrototypeTransitionsOrBackPointerOffset + kPointerSize);
+  Object** start_slot = HeapObject::RawField(
+      map, Map::kPointerFieldsBeginOffset);
+  Object** end_slot = HeapObject::RawField(
+      map, Map::kPrototypeTransitionsOrBackPointerOffset);
+  for (Object** slot = start_slot; slot < end_slot; slot++) {
+    Object* obj = *slot;
+    if (!obj->NonFailureIsHeapObject()) continue;
+    mark_compact_collector()->RecordSlot(start_slot, slot, obj);
+    base_marker()->MarkObjectAndPush(reinterpret_cast<HeapObject*>(obj));
   }
-
-  // Mark the Object* fields of the Map.
-  // Since the descriptor array has been marked already, it is fine
-  // that one of these fields contains a pointer to it.
-  Object** start_slot = HeapObject::RawField(map,
-                                             Map::kPointerFieldsBeginOffset);
-
-  Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
-
-  StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot);
-}
-
-
-void MarkCompactCollector::MarkAccessorPairSlot(HeapObject* accessors,
-                                                int offset) {
-  Object** slot = HeapObject::RawField(accessors, offset);
-  HeapObject* accessor = HeapObject::cast(*slot);
-  if (accessor->IsMap()) return;
-  RecordSlot(slot, slot, accessor);
-  MarkObjectAndPush(accessor);
 }
 
 
-void MarkCompactCollector::MarkDescriptorArray(
-    DescriptorArray* descriptors) {
-  MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors);
-  if (descriptors_mark.Get()) return;
+template <class T>
+void Marker<T>::MarkDescriptorArray(DescriptorArray* descriptors) {
   // Empty descriptor array is marked as a root before any maps are marked.
-  ASSERT(descriptors != heap()->empty_descriptor_array());
-  SetMark(descriptors, descriptors_mark);
+  ASSERT(descriptors != descriptors->GetHeap()->empty_descriptor_array());
 
-  FixedArray* contents = reinterpret_cast<FixedArray*>(
+  // The DescriptorArray contains a pointer to its contents array, but the
+  // contents array will be marked black and hence not be visited again.
+  if (!base_marker()->MarkObjectAndPush(descriptors)) return;
+  FixedArray* contents = FixedArray::cast(
       descriptors->get(DescriptorArray::kContentArrayIndex));
-  ASSERT(contents->IsHeapObject());
-  ASSERT(!IsMarked(contents));
-  ASSERT(contents->IsFixedArray());
   ASSERT(contents->length() >= 2);
-  MarkBit contents_mark = Marking::MarkBitFrom(contents);
-  SetMark(contents, contents_mark);
-  // Contents contains (value, details) pairs.  If the details say that the type
-  // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
-  // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as
-  // live.  Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and
-  // CONSTANT_TRANSITION is the value an Object* (a Map*).
+  ASSERT(Marking::IsWhite(Marking::MarkBitFrom(contents)));
+  base_marker()->MarkObjectWithoutPush(contents);
+
+  // Contents contains (value, details) pairs.  If the descriptor contains a
+  // transition (value is a Map), we don't mark the value as live.  It might
+  // be set to the NULL_DESCRIPTOR in ClearNonLiveTransitions later.
   for (int i = 0; i < contents->length(); i += 2) {
-    // If the pair (value, details) at index i, i+1 is not
-    // a transition or null descriptor, mark the value.
     PropertyDetails details(Smi::cast(contents->get(i + 1)));
 
     Object** slot = contents->data_start() + i;
     if (!(*slot)->IsHeapObject()) continue;
     HeapObject* value = HeapObject::cast(*slot);
 
-    RecordSlot(slot, slot, *slot);
+    mark_compact_collector()->RecordSlot(slot, slot, *slot);
 
     switch (details.type()) {
       case NORMAL:
@@ -1900,21 +1894,22 @@ void MarkCompactCollector::MarkDescriptorArray(
       case CONSTANT_FUNCTION:
       case HANDLER:
       case INTERCEPTOR:
-        MarkObjectAndPush(value);
+        base_marker()->MarkObjectAndPush(value);
         break;
       case CALLBACKS:
         if (!value->IsAccessorPair()) {
-          MarkObjectAndPush(value);
-        } else if (!MarkObjectWithoutPush(value)) {
-          MarkAccessorPairSlot(value, AccessorPair::kGetterOffset);
-          MarkAccessorPairSlot(value, AccessorPair::kSetterOffset);
+          base_marker()->MarkObjectAndPush(value);
+        } else if (base_marker()->MarkObjectWithoutPush(value)) {
+          AccessorPair* accessors = AccessorPair::cast(value);
+          MarkAccessorPairSlot(accessors, AccessorPair::kGetterOffset);
+          MarkAccessorPairSlot(accessors, AccessorPair::kSetterOffset);
         }
         break;
       case ELEMENTS_TRANSITION:
         // For maps with multiple elements transitions, the transition maps are
         // stored in a FixedArray. Keep the fixed array alive but not the maps
         // that it refers to.
-        if (value->IsFixedArray()) MarkObjectWithoutPush(value);
+        if (value->IsFixedArray()) base_marker()->MarkObjectWithoutPush(value);
         break;
       case MAP_TRANSITION:
       case CONSTANT_TRANSITION:
@@ -1922,26 +1917,16 @@ void MarkCompactCollector::MarkDescriptorArray(
         break;
     }
   }
-  // The DescriptorArray descriptors contains a pointer to its contents array,
-  // but the contents array is already marked.
-  marking_deque_.PushBlack(descriptors);
 }
 
 
-void MarkCompactCollector::CreateBackPointers() {
-  HeapObjectIterator iterator(heap()->map_space());
-  for (HeapObject* next_object = iterator.Next();
-       next_object != NULL; next_object = iterator.Next()) {
-    if (next_object->IsMap()) {  // Could also be FreeSpace object on free list.
-      Map* map = Map::cast(next_object);
-      STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
-      if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
-        map->CreateBackPointers();
-      } else {
-        ASSERT(map->instance_descriptors() == heap()->empty_descriptor_array());
-      }
-    }
-  }
+template <class T>
+void Marker<T>::MarkAccessorPairSlot(AccessorPair* accessors, int offset) {
+  Object** slot = HeapObject::RawField(accessors, offset);
+  HeapObject* accessor = HeapObject::cast(*slot);
+  if (accessor->IsMap()) return;
+  mark_compact_collector()->RecordSlot(slot, slot, accessor);
+  base_marker()->MarkObjectAndPush(accessor);
 }
 
 
@@ -1975,6 +1960,7 @@ static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
 
 
 static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) {
+  ASSERT(!marking_deque->IsFull());
   ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
   ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
   ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
@@ -2467,15 +2453,8 @@ void MarkCompactCollector::ReattachInitialMaps() {
 void MarkCompactCollector::ClearNonLiveTransitions() {
   HeapObjectIterator map_iterator(heap()->map_space());
   // Iterate over the map space, setting map transitions that go from
-  // a marked map to an unmarked map to null transitions.  At the same time,
-  // set all the prototype fields of maps back to their original value,
-  // dropping the back pointers temporarily stored in the prototype field.
-  // Setting the prototype field requires following the linked list of
-  // back pointers, reversing them all at once.  This allows us to find
-  // those maps with map transitions that need to be nulled, and only
-  // scan the descriptor arrays of those maps, not all maps.
-  // All of these actions are carried out only on maps of JSObjects
-  // and related subtypes.
+  // a marked map to an unmarked map to null transitions.  This action
+  // is carried out only on maps of JSObjects and related subtypes.
   for (HeapObject* obj = map_iterator.Next();
        obj != NULL; obj = map_iterator.Next()) {
     Map* map = reinterpret_cast<Map*>(obj);
@@ -2551,36 +2530,16 @@ void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
 
 void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
                                                       MarkBit map_mark) {
-  // Follow the chain of back pointers to find the prototype.
-  Object* real_prototype = map;
-  while (real_prototype->IsMap()) {
-    real_prototype = Map::cast(real_prototype)->prototype();
-    ASSERT(real_prototype->IsHeapObject());
-  }
+  Object* potential_parent = map->GetBackPointer();
+  if (!potential_parent->IsMap()) return;
+  Map* parent = Map::cast(potential_parent);
 
-  // Follow back pointers, setting them to prototype, clearing map transitions
-  // when necessary.
-  Map* current = map;
+  // Follow back pointer, check whether we are dealing with a map transition
+  // from a live map to a dead path and in case clear transitions of parent.
   bool current_is_alive = map_mark.Get();
-  bool on_dead_path = !current_is_alive;
-  while (current->IsMap()) {
-    Object* next = current->prototype();
-    // There should never be a dead map above a live map.
-    ASSERT(on_dead_path || current_is_alive);
-
-    // A live map above a dead map indicates a dead transition. This test will
-    // always be false on the first iteration.
-    if (on_dead_path && current_is_alive) {
-      on_dead_path = false;
-      current->ClearNonLiveTransitions(heap(), real_prototype);
-    }
-
-    Object** slot = HeapObject::RawField(current, Map::kPrototypeOffset);
-    *slot = real_prototype;
-    if (current_is_alive) RecordSlot(slot, slot, real_prototype);
-
-    current = reinterpret_cast<Map*>(next);
-    current_is_alive = Marking::MarkBitFrom(current).Get();
+  bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
+  if (!current_is_alive && parent_is_alive) {
+    parent->ClearNonLiveTransitions(heap());
   }
 }
 
@@ -2591,14 +2550,17 @@ void MarkCompactCollector::ProcessWeakMaps() {
     ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
     JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
     ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
+    Object** anchor = reinterpret_cast<Object**>(table->address());
     for (int i = 0; i < table->Capacity(); i++) {
       if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
-        Object* value = table->get(table->EntryToValueIndex(i));
-        StaticMarkingVisitor::VisitPointer(heap(), &value);
-        table->set_unchecked(heap(),
-                             table->EntryToValueIndex(i),
-                             value,
-                             UPDATE_WRITE_BARRIER);
+        Object** key_slot =
+            HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
+                ObjectHashTable::EntryToIndex(i)));
+        RecordSlot(anchor, key_slot, *key_slot);
+        Object** value_slot =
+            HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
+                ObjectHashTable::EntryToValueIndex(i)));
+        StaticMarkingVisitor::MarkObjectByPointer(this, anchor, value_slot);
       }
     }
     weak_map_obj = weak_map->next();
@@ -2781,15 +2743,15 @@ static void UpdatePointer(HeapObject** p, HeapObject* object) {
 }
 
 
-static HeapObject* UpdateReferenceInExternalStringTableEntry(Heap* heap,
-                                                             Object** p) {
+static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
+                                                         Object** p) {
   MapWord map_word = HeapObject::cast(*p)->map_word();
 
   if (map_word.IsForwardingAddress()) {
-    return HeapObject::cast(map_word.ToForwardingAddress());
+    return String::cast(map_word.ToForwardingAddress());
   }
 
-  return HeapObject::cast(*p);
+  return String::cast(*p);
 }
 
 
@@ -3418,6 +3380,8 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
   // under it.
   ProcessInvalidatedCode(&updating_visitor);
 
+  heap_->isolate()->inner_pointer_to_code_cache()->Flush();
+
 #ifdef DEBUG
   if (FLAG_verify_heap) {
     VerifyEvacuation(heap_);
@@ -3830,7 +3794,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
   bool lazy_sweeping_active = false;
   bool unused_page_present = false;
 
-  intptr_t old_space_size = heap()->PromotedSpaceSize();
+  intptr_t old_space_size = heap()->PromotedSpaceSizeOfObjects();
   intptr_t space_left =
       Min(heap()->OldGenPromotionLimit(old_space_size),
           heap()->OldGenAllocationLimit(old_space_size)) - old_space_size;
index 66ffd19..dbc2869 100644 (file)
@@ -42,6 +42,7 @@ typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
 // Forward declarations.
 class CodeFlusher;
 class GCTracer;
+class MarkCompactCollector;
 class MarkingVisitor;
 class RootMarkingVisitor;
 
@@ -166,7 +167,6 @@ class Marking {
 
 // ----------------------------------------------------------------------------
 // Marking deque for tracing live objects.
-
 class MarkingDeque {
  public:
   MarkingDeque()
@@ -383,6 +383,34 @@ class SlotsBuffer {
 };
 
 
+// -------------------------------------------------------------------------
+// Marker shared between incremental and non-incremental marking
+template<class BaseMarker> class Marker {
+ public:
+  Marker(BaseMarker* base_marker, MarkCompactCollector* mark_compact_collector)
+      : base_marker_(base_marker),
+        mark_compact_collector_(mark_compact_collector) {}
+
+  // Mark pointers in a Map and its DescriptorArray together, possibly
+  // treating transitions or back pointers weak.
+  void MarkMapContents(Map* map);
+  void MarkDescriptorArray(DescriptorArray* descriptors);
+  void MarkAccessorPairSlot(AccessorPair* accessors, int offset);
+
+ private:
+  BaseMarker* base_marker() {
+    return base_marker_;
+  }
+
+  MarkCompactCollector* mark_compact_collector() {
+    return mark_compact_collector_;
+  }
+
+  BaseMarker* base_marker_;
+  MarkCompactCollector* mark_compact_collector_;
+};
+
+
 // Defined in isolate.h.
 class ThreadLocalTop;
 
@@ -544,6 +572,8 @@ class MarkCompactCollector {
 
   void ClearMarkbits();
 
+  bool is_compacting() const { return compacting_; }
+
  private:
   MarkCompactCollector();
   ~MarkCompactCollector();
@@ -582,8 +612,6 @@ class MarkCompactCollector {
 
   bool was_marked_incrementally_;
 
-  bool collect_maps_;
-
   bool flush_monomorphic_ics_;
 
   // A pointer to the current stack-allocated GC tracer object during a full
@@ -606,12 +634,13 @@ class MarkCompactCollector {
   //
   //   After: Live objects are marked and non-live objects are unmarked.
 
-
   friend class RootMarkingVisitor;
   friend class MarkingVisitor;
   friend class StaticMarkingVisitor;
   friend class CodeMarkingVisitor;
   friend class SharedFunctionInfoMarkingVisitor;
+  friend class Marker<IncrementalMarking>;
+  friend class Marker<MarkCompactCollector>;
 
   // Mark non-optimize code for functions inlined into the given optimized
   // code. This will prevent it from being flushed.
@@ -629,29 +658,25 @@ class MarkCompactCollector {
   void AfterMarking();
 
   // Marks the object black and pushes it on the marking stack.
-  // This is for non-incremental marking.
+  // Returns true if object needed marking and false otherwise.
+  // This is for non-incremental marking only.
+  INLINE(bool MarkObjectAndPush(HeapObject* obj));
+
+  // Marks the object black and pushes it on the marking stack.
+  // This is for non-incremental marking only.
   INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
 
-  INLINE(bool MarkObjectWithoutPush(HeapObject* object));
-  INLINE(void MarkObjectAndPush(HeapObject* value));
+  // Marks the object black without pushing it on the marking stack.
+  // Returns true if object needed marking and false otherwise.
+  // This is for non-incremental marking only.
+  INLINE(bool MarkObjectWithoutPush(HeapObject* obj));
 
-  // Marks the object black.  This is for non-incremental marking.
+  // Marks the object black assuming that it is not yet marked.
+  // This is for non-incremental marking only.
   INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
 
   void ProcessNewlyMarkedObject(HeapObject* obj);
 
-  // Creates back pointers for all map transitions, stores them in
-  // the prototype field.  The original prototype pointers are restored
-  // in ClearNonLiveTransitions().  All JSObject maps
-  // connected by map transitions have the same prototype object, which
-  // is why we can use this field temporarily for back pointers.
-  void CreateBackPointers();
-
-  // Mark a Map and its DescriptorArray together, skipping transitions.
-  void MarkMapContents(Map* map);
-  void MarkAccessorPairSlot(HeapObject* accessors, int offset);
-  void MarkDescriptorArray(DescriptorArray* descriptors);
-
   // Mark the heap roots and all objects reachable from them.
   void MarkRoots(RootMarkingVisitor* visitor);
 
@@ -754,6 +779,7 @@ class MarkCompactCollector {
   MarkingDeque marking_deque_;
   CodeFlusher* code_flusher_;
   Object* encountered_weak_maps_;
+  Marker<MarkCompactCollector> marker_;
 
   List<Page*> evacuation_candidates_;
   List<Code*> invalidated_code_;
index 8e735c4..aee56af 100644 (file)
@@ -30,7 +30,6 @@
 // has the added benefit that the code in this file is isolated from
 // changes to these properties.
 var $floor = MathFloor;
-var $random = MathRandom;
 var $abs = MathAbs;
 
 // Instance class name can only be set on functions. That is the only
index a3adcf8..ab71936 100644 (file)
@@ -61,18 +61,21 @@ function FormatString(format, message) {
 
 
 // To check if something is a native error we need to check the
-// concrete native error types. It is not enough to check "obj
-// instanceof $Error" because user code can replace
-// NativeError.prototype.__proto__. User code cannot replace
-// NativeError.prototype though and therefore this is a safe test.
+// concrete native error types. It is not sufficient to use instanceof
+// since it possible to create an object that has Error.prototype on
+// its prototype chain. This is the case for DOMException for example.
 function IsNativeErrorObject(obj) {
-  return (obj instanceof $Error) ||
-      (obj instanceof $EvalError) ||
-      (obj instanceof $RangeError) ||
-      (obj instanceof $ReferenceError) ||
-      (obj instanceof $SyntaxError) ||
-      (obj instanceof $TypeError) ||
-      (obj instanceof $URIError);
+  switch (%_ClassOf(obj)) {
+    case 'Error':
+    case 'EvalError':
+    case 'RangeError':
+    case 'ReferenceError':
+    case 'SyntaxError':
+    case 'TypeError':
+    case 'URIError':
+      return true;
+  }
+  return false;
 }
 
 
@@ -745,7 +748,7 @@ function GetPositionInLine(message) {
 
 
 function GetStackTraceLine(recv, fun, pos, isGlobal) {
-  return FormatSourcePosition(new CallSite(recv, fun, pos));
+  return new CallSite(recv, fun, pos).toString();
 }
 
 // ----------------------------------------------------------------------------
@@ -919,6 +922,65 @@ function CallSiteIsConstructor() {
   return this.fun === constructor;
 }
 
+function CallSiteToString() {
+  var fileName;
+  var fileLocation = "";
+  if (this.isNative()) {
+    fileLocation = "native";
+  } else if (this.isEval()) {
+    fileName = this.getScriptNameOrSourceURL();
+    if (!fileName) {
+      fileLocation = this.getEvalOrigin();
+    }
+  } else {
+    fileName = this.getFileName();
+  }
+
+  if (fileName) {
+    fileLocation += fileName;
+    var lineNumber = this.getLineNumber();
+    if (lineNumber != null) {
+      fileLocation += ":" + lineNumber;
+      var columnNumber = this.getColumnNumber();
+      if (columnNumber) {
+        fileLocation += ":" + columnNumber;
+      }
+    }
+  }
+
+  if (!fileLocation) {
+    fileLocation = "unknown source";
+  }
+  var line = "";
+  var functionName = this.getFunction().name;
+  var addPrefix = true;
+  var isConstructor = this.isConstructor();
+  var isMethodCall = !(this.isToplevel() || isConstructor);
+  if (isMethodCall) {
+    var methodName = this.getMethodName();
+    line += this.getTypeName() + ".";
+    if (functionName) {
+      line += functionName;
+      if (methodName && (methodName != functionName)) {
+        line += " [as " + methodName + "]";
+      }
+    } else {
+      line += methodName || "<anonymous>";
+    }
+  } else if (isConstructor) {
+    line += "new " + (functionName || "<anonymous>");
+  } else if (functionName) {
+    line += functionName;
+  } else {
+    line += fileLocation;
+    addPrefix = false;
+  }
+  if (addPrefix) {
+    line += " (" + fileLocation + ")";
+  }
+  return line;
+}
+
 SetUpLockedPrototype(CallSite, $Array("receiver", "fun", "pos"), $Array(
   "getThis", CallSiteGetThis,
   "getTypeName", CallSiteGetTypeName,
@@ -934,7 +996,8 @@ SetUpLockedPrototype(CallSite, $Array("receiver", "fun", "pos"), $Array(
   "getColumnNumber", CallSiteGetColumnNumber,
   "isNative", CallSiteIsNative,
   "getPosition", CallSiteGetPosition,
-  "isConstructor", CallSiteIsConstructor
+  "isConstructor", CallSiteIsConstructor,
+  "toString", CallSiteToString
 ));
 
 
@@ -976,65 +1039,6 @@ function FormatEvalOrigin(script) {
   return eval_origin;
 }
 
-function FormatSourcePosition(frame) {
-  var fileName;
-  var fileLocation = "";
-  if (frame.isNative()) {
-    fileLocation = "native";
-  } else if (frame.isEval()) {
-    fileName = frame.getScriptNameOrSourceURL();
-    if (!fileName) {
-      fileLocation = frame.getEvalOrigin();
-    }
-  } else {
-    fileName = frame.getFileName();
-  }
-
-  if (fileName) {
-    fileLocation += fileName;
-    var lineNumber = frame.getLineNumber();
-    if (lineNumber != null) {
-      fileLocation += ":" + lineNumber;
-      var columnNumber = frame.getColumnNumber();
-      if (columnNumber) {
-        fileLocation += ":" + columnNumber;
-      }
-    }
-  }
-
-  if (!fileLocation) {
-    fileLocation = "unknown source";
-  }
-  var line = "";
-  var functionName = frame.getFunction().name;
-  var addPrefix = true;
-  var isConstructor = frame.isConstructor();
-  var isMethodCall = !(frame.isToplevel() || isConstructor);
-  if (isMethodCall) {
-    var methodName = frame.getMethodName();
-    line += frame.getTypeName() + ".";
-    if (functionName) {
-      line += functionName;
-      if (methodName && (methodName != functionName)) {
-        line += " [as " + methodName + "]";
-      }
-    } else {
-      line += methodName || "<anonymous>";
-    }
-  } else if (isConstructor) {
-    line += "new " + (functionName || "<anonymous>");
-  } else if (functionName) {
-    line += functionName;
-  } else {
-    line += fileLocation;
-    addPrefix = false;
-  }
-  if (addPrefix) {
-    line += " (" + fileLocation + ")";
-  }
-  return line;
-}
-
 function FormatStackTrace(error, frames) {
   var lines = [];
   try {
@@ -1050,7 +1054,7 @@ function FormatStackTrace(error, frames) {
     var frame = frames[i];
     var line;
     try {
-      line = FormatSourcePosition(frame);
+      line = frame.toString();
     } catch (e) {
       try {
         line = "<error: " + e + ">";
@@ -1125,13 +1129,7 @@ function SetUpError() {
     }
     %FunctionSetInstanceClassName(f, 'Error');
     %SetProperty(f.prototype, 'constructor', f, DONT_ENUM);
-    // The name property on the prototype of error objects is not
-    // specified as being read-one and dont-delete. However, allowing
-    // overwriting allows leaks of error objects between script blocks
-    // in the same context in a browser setting. Therefore we fix the
-    // name.
-    %SetProperty(f.prototype, "name", name,
-                 DONT_ENUM | DONT_DELETE | READ_ONLY)  ;
+    %SetProperty(f.prototype, "name", name, DONT_ENUM);
     %SetCode(f, function(m) {
       if (%_IsConstructCall()) {
         // Define all the expected properties directly on the error
@@ -1147,10 +1145,8 @@ function SetUpError() {
               return FormatMessage(%NewMessageObject(obj.type, obj.arguments));
           });
         } else if (!IS_UNDEFINED(m)) {
-          %IgnoreAttributesAndSetProperty(this,
-                                          'message',
-                                          ToString(m),
-                                          DONT_ENUM);
+          %IgnoreAttributesAndSetProperty(
+            this, 'message', ToString(m), DONT_ENUM);
         }
         captureStackTrace(this, f);
       } else {
@@ -1180,16 +1176,41 @@ $Error.captureStackTrace = captureStackTrace;
 var visited_errors = new InternalArray();
 var cyclic_error_marker = new $Object();
 
+function GetPropertyWithoutInvokingMonkeyGetters(error, name) {
+  // Climb the prototype chain until we find the holder.
+  while (error && !%HasLocalProperty(error, name)) {
+    error = error.__proto__;
+  }
+  if (error === null) return void 0;
+  if (!IS_OBJECT(error)) return error[name];
+  // If the property is an accessor on one of the predefined errors that can be
+  // generated statically by the compiler, don't touch it. This is to address
+  // http://code.google.com/p/chromium/issues/detail?id=69187
+  var desc = %GetOwnProperty(error, name);
+  if (desc && desc[IS_ACCESSOR_INDEX]) {
+    var isName = name === "name";
+    if (error === $ReferenceError.prototype)
+      return isName ? "ReferenceError" : void 0;
+    if (error === $SyntaxError.prototype)
+      return isName ? "SyntaxError" : void 0;
+    if (error === $TypeError.prototype)
+      return isName ? "TypeError" : void 0;
+  }
+  // Otherwise, read normally.
+  return error[name];
+}
+
 function ErrorToStringDetectCycle(error) {
   if (!%PushIfAbsent(visited_errors, error)) throw cyclic_error_marker;
   try {
-    var type = error.type;
-    var name = error.name;
+    var type = GetPropertyWithoutInvokingMonkeyGetters(error, "type");
+    var name = GetPropertyWithoutInvokingMonkeyGetters(error, "name");
     name = IS_UNDEFINED(name) ? "Error" : TO_STRING_INLINE(name);
-    var message = error.message;
+    var message = GetPropertyWithoutInvokingMonkeyGetters(error, "message");
     var hasMessage = %_CallFunction(error, "message", ObjectHasOwnProperty);
     if (type && !hasMessage) {
-      message = FormatMessage(%NewMessageObject(type, error.arguments));
+      var args = GetPropertyWithoutInvokingMonkeyGetters(error, "arguments");
+      message = FormatMessage(%NewMessageObject(type, args));
     }
     message = IS_UNDEFINED(message) ? "" : TO_STRING_INLINE(message);
     if (name === "") return message;
index fa64e1e..f347fdc 100644 (file)
@@ -2137,6 +2137,15 @@ Address Assembler::target_address_at(Address pc) {
 }
 
 
+// MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
+// qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
+// snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
+// OS::nan_value() returns a qNaN.
+void Assembler::QuietNaN(HeapObject* object) {
+  HeapNumber::cast(object)->set_value(OS::nan_value());
+}
+
+
 // On Mips, a target address is stored in a lui/ori instruction pair, each
 // of which load 16 bits of the 32-bit address to a register.
 // Patching the address must replace both instr, and flush the i-cache.
index 5e67d0c..84714e5 100644 (file)
@@ -570,6 +570,8 @@ class Assembler : public AssemblerBase {
 
   static void JumpLabelToJumpRegister(Address pc);
 
+  static void QuietNaN(HeapObject* nan);
+
   // This sets the branch destination (which gets loaded at the call address).
   // This is for calls and branches within generated code.  The serializer
   // has already deserialized the lui/ori instructions etc.
index e0ecded..f3dd95b 100644 (file)
@@ -171,11 +171,6 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
   __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
   __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
 
-  // Copy the qml global object from the surrounding context.
-  __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::QML_GLOBAL_INDEX)));
-  __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::QML_GLOBAL_INDEX)));
-
-
   // Initialize the rest of the slots to undefined.
   __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
   for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
@@ -238,10 +233,6 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
   __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
   __ sw(a2, ContextOperand(v0, Context::GLOBAL_INDEX));
 
-  // Copy the qml global object from the surrounding context.
-  __ lw(a1, ContextOperand(cp, Context::QML_GLOBAL_INDEX));
-  __ sw(a1, ContextOperand(v0, Context::QML_GLOBAL_INDEX));
-
   // Initialize the rest of the slots to the hole value.
   __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
   for (int i = 0; i < slots_; i++) {
@@ -1716,46 +1707,6 @@ void CompareStub::Generate(MacroAssembler* masm) {
 
   // NOTICE! This code is only reached after a smi-fast-case check, so
   // it is certain that at least one operand isn't a smi.
-  {
-    // This is optimized for reading the code and not benchmarked for
-    // speed or amount of instructions. The code is not ordered for speed
-    // or anything like this
-    Label miss, user_compare;
-
-    // No global compare if both operands are SMIs
-    __ And(a2, a1, Operand(a0));
-    __ JumpIfSmi(a2, &miss);
-
-
-    // We need to check if lhs and rhs are both objects, if not we are
-    // jumping out of the function. We will keep the 'map' in t0 (lhs) and
-    // t1 (rhs) for later usage.
-    __ GetObjectType(a0, t0, a3);
-    __ Branch(&miss, ne, a3, Operand(JS_OBJECT_TYPE));
-
-    __ GetObjectType(a1, t1, a3);
-    __ Branch(&miss, ne, a3, Operand(JS_OBJECT_TYPE));
-
-    // Check if the UseUserComparison flag is set by using the map of t0 for lhs
-    __ lbu(t0, FieldMemOperand(t0, Map::kBitField2Offset));
-    __ And(t0, t0, Operand(1 << Map::kUseUserObjectComparison));
-    __ Branch(&user_compare, eq, t0, Operand(1 << Map::kUseUserObjectComparison));
-
-
-    // Check if the UseUserComparison flag is _not_ set by using the map of t1 for
-    // rhs and then jump to the miss label.
-    __ lbu(t1, FieldMemOperand(t1, Map::kBitField2Offset));
-    __ And(t1, t1, Operand(1 << Map::kUseUserObjectComparison));
-    __ Branch(&miss, ne, t1, Operand(1 << Map::kUseUserObjectComparison));
-
-    // Invoke the runtime function here
-    __ bind(&user_compare);
-    __ Push(a0, a1);
-    __ TailCallRuntime(Runtime::kUserObjectEquals, 2, 1);
-
-    // We exit here without doing anything
-    __ bind(&miss);
-  }
 
   // Handle the case where the objects are identical.  Either returns the answer
   // or goes to slow.  Only falls through if the objects were not identical.
@@ -5449,9 +5400,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
     __ Branch(&call, ne, t0, Operand(at));
     // Patch the receiver on the stack with the global receiver object.
-    __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
-    __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
-    __ sw(a2, MemOperand(sp, argc_ * kPointerSize));
+    __ lw(a3, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+    __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalReceiverOffset));
+    __ sw(a3, MemOperand(sp, argc_ * kPointerSize));
     __ bind(&call);
   }
 
@@ -5459,8 +5410,12 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
   // a1: pushed function (to be verified)
   __ JumpIfSmi(a1, &non_function);
   // Get the map of the function object.
-  __ GetObjectType(a1, a2, a2);
-  __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
+  __ GetObjectType(a1, a3, a3);
+  __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
+
+  if (RecordCallTarget()) {
+    GenerateRecordCallTarget(masm);
+  }
 
   // Fast-case: Invoke the function now.
   // a1: pushed function
@@ -5485,8 +5440,17 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
 
   // Slow-case: Non-function called.
   __ bind(&slow);
+  if (RecordCallTarget()) {
+    // If there is a call target cache, mark it megamorphic in the
+    // non-function case.  MegamorphicSentinel is an immortal immovable
+    // object (undefined) so no write barrier is needed.
+    ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
+              masm->isolate()->heap()->undefined_value());
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    __ sw(at, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+  }
   // Check for function proxy.
-  __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_PROXY_TYPE));
+  __ Branch(&non_function, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
   __ push(a1);  // Put proxy as additional argument.
   __ li(a0, Operand(argc_ + 1, RelocInfo::NONE));
   __ li(a2, Operand(0, RelocInfo::NONE));
@@ -6142,37 +6106,11 @@ void SubStringStub::Generate(MacroAssembler* masm) {
   // a2: result string length
   __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
   __ sra(t0, t0, 1);
+  // Return original string.
   __ Branch(&return_v0, eq, a2, Operand(t0));
-
-
-  Label result_longer_than_two;
-  // Check for special case of two character ASCII string, in which case
-  // we do a lookup in the symbol table first.
-  __ li(t0, 2);
-  __ Branch(&result_longer_than_two, gt, a2, Operand(t0));
-  __ Branch(&runtime, lt, a2, Operand(t0));
-
-  __ JumpIfInstanceTypeIsNotSequentialAscii(a1, a1, &runtime);
-
-  // Get the two characters forming the sub string.
-  __ Addu(v0, v0, Operand(a3));
-  __ lbu(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
-  __ lbu(t0, FieldMemOperand(v0, SeqAsciiString::kHeaderSize + 1));
-
-  // Try to lookup two character string in symbol table.
-  Label make_two_character_string;
-  StringHelper::GenerateTwoCharacterSymbolTableProbe(
-      masm, a3, t0, a1, t1, t2, t3, t4, &make_two_character_string);
-  __ jmp(&return_v0);
-
-  // a2: result string length.
-  // a3: two characters combined into halfword in little endian byte order.
-  __ bind(&make_two_character_string);
-  __ AllocateAsciiString(v0, a2, t0, t1, t4, &runtime);
-  __ sh(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
-  __ jmp(&return_v0);
-
-  __ bind(&result_longer_than_two);
+  // Longer than original string's length or negative: unsafe arguments.
+  __ Branch(&runtime, hi, a2, Operand(t0));
+  // Shorter than original string's length: an actual substring.
 
   // Deal with different string types: update the index if necessary
   // and put the underlying string into t1.
@@ -7030,20 +6968,10 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
   __ And(a2, a1, Operand(a0));
   __ JumpIfSmi(a2, &miss);
 
-  // Compare lhs, a2 holds the map, a3 holds the type_reg
-  __ GetObjectType(a0, a2, a3);
-  __ Branch(&miss, ne, a3, Operand(JS_OBJECT_TYPE));
-  __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
-  __ And(a2, a2, Operand(1 << Map::kUseUserObjectComparison));
-  __ Branch(&miss, eq, a2, Operand(1 << Map::kUseUserObjectComparison));
-
-
-  // Compare rhs, a2 holds the map, a3 holds the type_reg
-  __ GetObjectType(a1, a2, a3);
-  __ Branch(&miss, ne, a3, Operand(JS_OBJECT_TYPE));
-  __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
-  __ And(a2, a2, Operand(1 << Map::kUseUserObjectComparison));
-  __ Branch(&miss, eq, a2, Operand(1 << Map::kUseUserObjectComparison));
+  __ GetObjectType(a0, a2, a2);
+  __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+  __ GetObjectType(a1, a2, a2);
+  __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
 
   ASSERT(GetCondition() == eq);
   __ Ret(USE_DELAY_SLOT);
@@ -7060,18 +6988,8 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
   __ JumpIfSmi(a2, &miss);
   __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
   __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
-
-  // Check object in a0
   __ Branch(&miss, ne, a2, Operand(known_map_));
-  __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
-  __ And(a2, a2, Operand(1 << Map::kUseUserObjectComparison));
-  __ Branch(&miss, eq, a2, Operand(1 << Map::kUseUserObjectComparison));
-
-  // Check object in a1
   __ Branch(&miss, ne, a3, Operand(known_map_));
-  __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
-  __ And(a3, a3, Operand(1 << Map::kUseUserObjectComparison));
-  __ Branch(&miss, eq, a3, Operand(1 << Map::kUseUserObjectComparison));
 
   __ Ret(USE_DELAY_SLOT);
   __ subu(v0, a0, a1);
index fd04722..3d58571 100644 (file)
@@ -788,11 +788,6 @@ const int kBArgsSlotsSize = 0 * Instruction::kInstrSize;
 
 const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
 
-const int kDoubleAlignmentBits = 3;
-const int kDoubleAlignment = (1 << kDoubleAlignmentBits);
-const int kDoubleAlignmentMask = kDoubleAlignment - 1;
-
-
 } }   // namespace v8::internal
 
 #endif    // #ifndef V8_MIPS_CONSTANTS_H_
index 83f5f50..3be1e4d 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -116,6 +116,8 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
                      Assembler::kDebugBreakSlotInstructions);
 }
 
+const bool Debug::FramePaddingLayout::kIsSupported = false;
+
 
 #define __ ACCESS_MASM(masm)
 
index 51c2e46..62f3155 100644 (file)
@@ -447,6 +447,8 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
 
 void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
                                               int frame_index) {
+  Builtins* builtins = isolate_->builtins();
+  Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
   JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
   unsigned height = iterator->Next();
   unsigned height_in_bytes = height * kPointerSize;
@@ -454,7 +456,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
     PrintF("  translating construct stub => height=%d\n", height_in_bytes);
   }
 
-  unsigned fixed_frame_size = 7 * kPointerSize;
+  unsigned fixed_frame_size = 8 * kPointerSize;
   unsigned output_frame_size = height_in_bytes + fixed_frame_size;
 
   // Allocate and store the output frame description.
@@ -519,6 +521,15 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
            top_address + output_offset, output_offset, value);
   }
 
+  // The output frame reflects a JSConstructStubGeneric frame.
+  output_offset -= kPointerSize;
+  value = reinterpret_cast<intptr_t>(construct_stub);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; code object\n",
+           top_address + output_offset, output_offset, value);
+  }
+
   // Number of incoming arguments.
   output_offset -= kPointerSize;
   value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
@@ -549,8 +560,6 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
 
   ASSERT(0 == output_offset);
 
-  Builtins* builtins = isolate_->builtins();
-  Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
   uint32_t pc = reinterpret_cast<uint32_t>(
       construct_stub->instruction_start() +
       isolate_->heap()->construct_stub_deopt_pc_offset()->value());
index 2dbb112..7be5056 100644 (file)
@@ -189,13 +189,12 @@ void FullCodeGenerator::Generate() {
 
   // Possibly allocate a local context.
   int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
-  if (heap_slots > 0 ||
-      (scope()->is_qml_mode() && scope()->is_global_scope())) {
+  if (heap_slots > 0) {
     Comment cmnt(masm_, "[ Allocate local context");
     // Argument to NewContext is the function, which is in a1.
     __ push(a1);
     if (heap_slots <= FastNewContextStub::kMaximumSlots) {
-      FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
+      FastNewContextStub stub(heap_slots);
       __ CallStub(&stub);
     } else {
       __ CallRuntime(Runtime::kNewFunctionContext, 1);
@@ -276,11 +275,11 @@ void FullCodeGenerator::Generate() {
       // For named function expressions, declare the function name as a
       // constant.
       if (scope()->is_function_scope() && scope()->function() != NULL) {
-        VariableProxy* proxy = scope()->function();
-        ASSERT(proxy->var()->mode() == CONST ||
-               proxy->var()->mode() == CONST_HARMONY);
-        ASSERT(proxy->var()->location() != Variable::UNALLOCATED);
-        EmitDeclaration(proxy, proxy->var()->mode(), NULL);
+        VariableDeclaration* function = scope()->function();
+        ASSERT(function->proxy()->var()->mode() == CONST ||
+               function->proxy()->var()->mode() == CONST_HARMONY);
+        ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
+        VisitVariableDeclaration(function);
       }
       VisitDeclarations(scope()->declarations());
     }
@@ -790,64 +789,53 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
 }
 
 
-void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
-                                        VariableMode mode,
-                                        FunctionLiteral* function) {
+void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
+  // The variable in the declaration always resides in the current function
+  // context.
+  ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+  if (FLAG_debug_code) {
+    // Check that we're not inside a with or catch context.
+    __ lw(a1, FieldMemOperand(cp, HeapObject::kMapOffset));
+    __ LoadRoot(t0, Heap::kWithContextMapRootIndex);
+    __ Check(ne, "Declaration in with context.",
+        a1, Operand(t0));
+    __ LoadRoot(t0, Heap::kCatchContextMapRootIndex);
+    __ Check(ne, "Declaration in catch context.",
+        a1, Operand(t0));
+  }
+}
+
+
+void FullCodeGenerator::VisitVariableDeclaration(
+    VariableDeclaration* declaration) {
   // If it was not possible to allocate the variable at compile time, we
   // need to "declare" it at runtime to make sure it actually exists in the
   // local context.
+  VariableProxy* proxy = declaration->proxy();
+  VariableMode mode = declaration->mode();
   Variable* variable = proxy->var();
-  bool binding_needs_init = (function == NULL) &&
-      (mode == CONST || mode == CONST_HARMONY || mode == LET);
+  bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
   switch (variable->location()) {
     case Variable::UNALLOCATED:
-      ++global_count_;
+      globals_->Add(variable->name());
+      globals_->Add(variable->binding_needs_init()
+                        ? isolate()->factory()->the_hole_value()
+                        : isolate()->factory()->undefined_value());
       break;
 
     case Variable::PARAMETER:
     case Variable::LOCAL:
-      if (function != NULL) {
-        Comment cmnt(masm_, "[ Declaration");
-        VisitForAccumulatorValue(function);
-        __ sw(result_register(), StackOperand(variable));
-      } else if (binding_needs_init) {
-          Comment cmnt(masm_, "[ Declaration");
-          __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
-          __ sw(t0, StackOperand(variable));
+      if (hole_init) {
+        Comment cmnt(masm_, "[ VariableDeclaration");
+        __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+        __ sw(t0, StackOperand(variable));
       }
       break;
 
       case Variable::CONTEXT:
-      // The variable in the decl always resides in the current function
-      // context.
-      ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
-      if (FLAG_debug_code) {
-        // Check that we're not inside a with or catch context.
-        __ lw(a1, FieldMemOperand(cp, HeapObject::kMapOffset));
-        __ LoadRoot(t0, Heap::kWithContextMapRootIndex);
-        __ Check(ne, "Declaration in with context.",
-                 a1, Operand(t0));
-        __ LoadRoot(t0, Heap::kCatchContextMapRootIndex);
-        __ Check(ne, "Declaration in catch context.",
-                 a1, Operand(t0));
-      }
-      if (function != NULL) {
-        Comment cmnt(masm_, "[ Declaration");
-        VisitForAccumulatorValue(function);
-        __ sw(result_register(), ContextOperand(cp, variable->index()));
-        int offset = Context::SlotOffset(variable->index());
-        // We know that we have written a function, which is not a smi.
-        __ RecordWriteContextSlot(cp,
-                                  offset,
-                                  result_register(),
-                                  a2,
-                                  kRAHasBeenSaved,
-                                  kDontSaveFPRegs,
-                                  EMIT_REMEMBERED_SET,
-                                  OMIT_SMI_CHECK);
-        PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
-      } else if (binding_needs_init) {
-          Comment cmnt(masm_, "[ Declaration");
+      if (hole_init) {
+        Comment cmnt(masm_, "[ VariableDeclaration");
+        EmitDebugCheckDeclarationContext(variable);
           __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
           __ sw(at, ContextOperand(cp, variable->index()));
           // No write barrier since the_hole_value is in old space.
@@ -856,13 +844,11 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
       break;
 
     case Variable::LOOKUP: {
-      Comment cmnt(masm_, "[ Declaration");
+      Comment cmnt(masm_, "[ VariableDeclaration");
       __ li(a2, Operand(variable->name()));
       // Declaration nodes are always introduced in one of four modes.
-      ASSERT(mode == VAR ||
-             mode == CONST ||
-             mode == CONST_HARMONY ||
-             mode == LET);
+      ASSERT(mode == VAR || mode == LET ||
+             mode == CONST || mode == CONST_HARMONY);
       PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
         ? READ_ONLY : NONE;
       __ li(a1, Operand(Smi::FromInt(attr)));
@@ -870,13 +856,9 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
       // Note: For variables we must not push an initial value (such as
       // 'undefined') because we may have a (legal) redeclaration and we
       // must not destroy the current value.
-      if (function != NULL) {
-        __ Push(cp, a2, a1);
-        // Push initial value for function declaration.
-        VisitForStackValue(function);
-      } else if (binding_needs_init) {
-          __ LoadRoot(a0, Heap::kTheHoleValueRootIndex);
-          __ Push(cp, a2, a1, a0);
+      if (hole_init) {
+        __ LoadRoot(a0, Heap::kTheHoleValueRootIndex);
+        __ Push(cp, a2, a1, a0);
       } else {
         ASSERT(Smi::FromInt(0) == 0);
         __ mov(a0, zero_reg);  // Smi::FromInt(0) indicates no initial value.
@@ -889,6 +871,122 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
 }
 
 
+void FullCodeGenerator::VisitFunctionDeclaration(
+    FunctionDeclaration* declaration) {
+  VariableProxy* proxy = declaration->proxy();
+  Variable* variable = proxy->var();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      globals_->Add(variable->name());
+      Handle<SharedFunctionInfo> function =
+          Compiler::BuildFunctionInfo(declaration->fun(), script());
+      // Check for stack-overflow exception.
+      if (function.is_null()) return SetStackOverflow();
+      globals_->Add(function);
+      break;
+    }
+
+    case Variable::PARAMETER:
+    case Variable::LOCAL: {
+      Comment cmnt(masm_, "[ FunctionDeclaration");
+      VisitForAccumulatorValue(declaration->fun());
+      __ sw(result_register(), StackOperand(variable));
+      break;
+    }
+
+    case Variable::CONTEXT: {
+      Comment cmnt(masm_, "[ FunctionDeclaration");
+      EmitDebugCheckDeclarationContext(variable);
+      VisitForAccumulatorValue(declaration->fun());
+      __ sw(result_register(), ContextOperand(cp, variable->index()));
+      int offset = Context::SlotOffset(variable->index());
+      // We know that we have written a function, which is not a smi.
+      __ RecordWriteContextSlot(cp,
+                                offset,
+                                result_register(),
+                                a2,
+                                kRAHasBeenSaved,
+                                kDontSaveFPRegs,
+                                EMIT_REMEMBERED_SET,
+                                OMIT_SMI_CHECK);
+      PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+      break;
+    }
+
+    case Variable::LOOKUP: {
+      Comment cmnt(masm_, "[ FunctionDeclaration");
+      __ li(a2, Operand(variable->name()));
+      __ li(a1, Operand(Smi::FromInt(NONE)));
+      __ Push(cp, a2, a1);
+      // Push initial value for function declaration.
+      VisitForStackValue(declaration->fun());
+      __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+      break;
+    }
+  }
+}
+
+
+void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
+  VariableProxy* proxy = declaration->proxy();
+  Variable* variable = proxy->var();
+  Handle<JSModule> instance = declaration->module()->interface()->Instance();
+  ASSERT(!instance.is_null());
+
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      Comment cmnt(masm_, "[ ModuleDeclaration");
+      globals_->Add(variable->name());
+      globals_->Add(instance);
+      Visit(declaration->module());
+      break;
+    }
+
+    case Variable::CONTEXT: {
+      Comment cmnt(masm_, "[ ModuleDeclaration");
+      EmitDebugCheckDeclarationContext(variable);
+      __ li(a1, Operand(instance));
+      __ sw(a1, ContextOperand(cp, variable->index()));
+      Visit(declaration->module());
+      break;
+    }
+
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+    case Variable::LOOKUP:
+      UNREACHABLE();
+  }
+}
+
+
+void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
+  VariableProxy* proxy = declaration->proxy();
+  Variable* variable = proxy->var();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED:
+      // TODO(rossberg)
+      break;
+
+    case Variable::CONTEXT: {
+      Comment cmnt(masm_, "[ ImportDeclaration");
+      EmitDebugCheckDeclarationContext(variable);
+      // TODO(rossberg)
+      break;
+    }
+
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+    case Variable::LOOKUP:
+      UNREACHABLE();
+  }
+}
+
+
+void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
+  // TODO(rossberg)
+}
+
+
 void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.
   // The context is the first argument.
@@ -1238,7 +1336,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
     __ bind(&fast);
   }
 
-  __ lw(a0, var->is_qml_global() ? QmlGlobalObjectOperand():GlobalObjectOperand());
+  __ lw(a0, GlobalObjectOperand());
   __ li(a2, Operand(var->name()));
   RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
       ? RelocInfo::CODE_TARGET
@@ -1325,7 +1423,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
       Comment cmnt(masm_, "Global variable");
       // Use inline caching. Variable name is passed in a2 and the global
       // object (receiver) in a0.
-      __ lw(a0, var->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+      __ lw(a0, GlobalObjectOperand());
       __ li(a2, Operand(var->name()));
       Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
       CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
@@ -2003,7 +2101,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
     // Global var, const, or let.
     __ mov(a0, result_register());
     __ li(a2, Operand(var->name()));
-    __ lw(a1, var->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+    __ lw(a1, GlobalObjectOperand());
     Handle<Code> ic = is_classic_mode()
         ? isolate()->builtins()->StoreIC_Initialize()
         : isolate()->builtins()->StoreIC_Initialize_Strict();
@@ -2290,6 +2388,18 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
   }
   // Record source position for debugger.
   SetSourcePosition(expr->position());
+
+  // Record call targets in unoptimized code, but not in the snapshot.
+  if (!Serializer::enabled()) {
+    flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
+    Handle<Object> uninitialized =
+        TypeFeedbackCells::UninitializedSentinel(isolate());
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+    RecordTypeFeedbackCell(expr->id(), cell);
+    __ li(a2, Operand(cell));
+  }
+
   CallFunctionStub stub(arg_count, flags);
   __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
   __ CallStub(&stub);
@@ -2321,12 +2431,8 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
   __ li(a1, Operand(Smi::FromInt(scope()->start_position())));
   __ push(a1);
 
-  // Push the qml mode flag.
-  __ li(a1, Operand(Smi::FromInt(is_qml_mode())));
-  __ push(a1);
-
   // Do the runtime call.
-  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
+  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
 }
 
 
@@ -2382,7 +2488,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
     context()->DropAndPlug(1, v0);
   } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
     // Push global object as receiver for the call IC.
-    __ lw(a0, proxy->var()->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+    __ lw(a0, GlobalObjectOperand());
     __ push(a0);
     EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
   } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@@ -3394,104 +3500,6 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
 }
 
 
-void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 3);
-  VisitForStackValue(args->at(0));
-  VisitForStackValue(args->at(1));
-  VisitForStackValue(args->at(2));
-  Label done;
-  Label slow_case;
-  Register object = a0;
-  Register index1 = a1;
-  Register index2 = a2;
-  Register elements = a3;
-  Register scratch1 = t0;
-  Register scratch2 = t1;
-
-  __ lw(object, MemOperand(sp, 2 * kPointerSize));
-  // Fetch the map and check if array is in fast case.
-  // Check that object doesn't require security checks and
-  // has no indexed interceptor.
-  __ GetObjectType(object, scratch1, scratch2);
-  __ Branch(&slow_case, ne, scratch2, Operand(JS_ARRAY_TYPE));
-  // Map is now in scratch1.
-
-  __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
-  __ And(scratch2, scratch2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
-  __ Branch(&slow_case, ne, scratch2, Operand(zero_reg));
-
-  // Check the object's elements are in fast case and writable.
-  __ lw(elements, FieldMemOperand(object, JSObject::kElementsOffset));
-  __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ LoadRoot(scratch2, Heap::kFixedArrayMapRootIndex);
-  __ Branch(&slow_case, ne, scratch1, Operand(scratch2));
-
-  // Check that both indices are smis.
-  __ lw(index1, MemOperand(sp, 1 * kPointerSize));
-  __ lw(index2, MemOperand(sp, 0));
-  __ JumpIfNotBothSmi(index1, index2, &slow_case);
-
-  // Check that both indices are valid.
-  Label not_hi;
-  __ lw(scratch1, FieldMemOperand(object, JSArray::kLengthOffset));
-  __ Branch(&slow_case, ls, scratch1, Operand(index1));
-  __ Branch(&not_hi, NegateCondition(hi), scratch1, Operand(index1));
-  __ Branch(&slow_case, ls, scratch1, Operand(index2));
-  __ bind(&not_hi);
-
-  // Bring the address of the elements into index1 and index2.
-  __ Addu(scratch1, elements,
-      Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ sll(index1, index1, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(index1, scratch1, index1);
-  __ sll(index2, index2, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(index2, scratch1, index2);
-
-  // Swap elements.
-  __ lw(scratch1, MemOperand(index1, 0));
-  __ lw(scratch2, MemOperand(index2, 0));
-  __ sw(scratch1, MemOperand(index2, 0));
-  __ sw(scratch2, MemOperand(index1, 0));
-
-  Label no_remembered_set;
-  __ CheckPageFlag(elements,
-                   scratch1,
-                   1 << MemoryChunk::SCAN_ON_SCAVENGE,
-                   ne,
-                   &no_remembered_set);
-  // Possible optimization: do a check that both values are Smis
-  // (or them and test against Smi mask).
-
-  // We are swapping two objects in an array and the incremental marker never
-  // pauses in the middle of scanning a single object.  Therefore the
-  // incremental marker is not disturbed, so we don't need to call the
-  // RecordWrite stub that notifies the incremental marker.
-  __ RememberedSetHelper(elements,
-                         index1,
-                         scratch2,
-                         kDontSaveFPRegs,
-                         MacroAssembler::kFallThroughAtEnd);
-  __ RememberedSetHelper(elements,
-                         index2,
-                         scratch2,
-                         kDontSaveFPRegs,
-                         MacroAssembler::kFallThroughAtEnd);
-
-  __ bind(&no_remembered_set);
-  // We are done. Drop elements from the stack, and return undefined.
-  __ Drop(3);
-  __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
-  __ jmp(&done);
-
-  __ bind(&slow_case);
-  __ CallRuntime(Runtime::kSwapElements, 3);
-
-  __ bind(&done);
-  context()->Plug(v0);
-}
-
-
 void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
@@ -3921,7 +3929,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
         // but "delete this" is allowed.
         ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
         if (var->IsUnallocated()) {
-          __ lw(a2, var->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+          __ lw(a2, GlobalObjectOperand());
           __ li(a1, Operand(var->name()));
           __ li(a0, Operand(Smi::FromInt(kNonStrictMode)));
           __ Push(a2, a1, a0);
@@ -4227,7 +4235,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
   VariableProxy* proxy = expr->AsVariableProxy();
   if (proxy != NULL && proxy->var()->IsUnallocated()) {
     Comment cmnt(masm_, "Global variable");
-    __ lw(a0, proxy->var()->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+    __ lw(a0, GlobalObjectOperand());
     __ li(a2, Operand(proxy->name()));
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
     // Use a regular load, not a contextual load, to avoid a reference
@@ -4489,7 +4497,8 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
 
 void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
   Scope* declaration_scope = scope()->DeclarationScope();
-  if (declaration_scope->is_global_scope()) {
+  if (declaration_scope->is_global_scope() ||
+      declaration_scope->is_module_scope()) {
     // Contexts nested in the global context have a canonical empty function
     // as their closure, not the anonymous closure containing the global
     // code.  Pass a smi sentinel and let the runtime look up the empty
index 2c4da1a..964a7e2 100644 (file)
@@ -767,7 +767,7 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
   __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE));
 
   // Check that the key is a positive smi.
-  __ And(scratch1, key, Operand(0x8000001));
+  __ And(scratch1, key, Operand(0x80000001));
   __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
 
   // Load the elements into scratch1 and check its map.
@@ -1688,12 +1688,12 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
 
   // Activate inlined smi code.
   if (previous_state == UNINITIALIZED) {
-    PatchInlinedSmiCode(address());
+    PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
   }
 }
 
 
-void PatchInlinedSmiCode(Address address) {
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
   Address andi_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
 
@@ -1727,33 +1727,30 @@ void PatchInlinedSmiCode(Address address) {
   Instr instr_at_patch = Assembler::instr_at(patch_address);
   Instr branch_instr =
       Assembler::instr_at(patch_address + Instruction::kInstrSize);
-  ASSERT(Assembler::IsAndImmediate(instr_at_patch));
-  ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch));
+  // This is patching a conditional "jump if not smi/jump if smi" site.
+  // Enabling by changing from
+  //   andi at, rx, 0
+  //   Branch <target>, eq, at, Operand(zero_reg)
+  // to:
+  //   andi at, rx, #kSmiTagMask
+  //   Branch <target>, ne, at, Operand(zero_reg)
+  // and vice-versa to be disabled again.
+  CodePatcher patcher(patch_address, 2);
+  Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
+  if (check == ENABLE_INLINED_SMI_CHECK) {
+    ASSERT(Assembler::IsAndImmediate(instr_at_patch));
+    ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch));
+    patcher.masm()->andi(at, reg, kSmiTagMask);
+  } else {
+    ASSERT(check == DISABLE_INLINED_SMI_CHECK);
+    ASSERT(Assembler::IsAndImmediate(instr_at_patch));
+    patcher.masm()->andi(at, reg, 0);
+  }
   ASSERT(Assembler::IsBranch(branch_instr));
   if (Assembler::IsBeq(branch_instr)) {
-    // This is patching a "jump if not smi" site to be active.
-    // Changing:
-    //   andi at, rx, 0
-    //   Branch <target>, eq, at, Operand(zero_reg)
-    // to:
-    //   andi at, rx, #kSmiTagMask
-    //   Branch <target>, ne, at, Operand(zero_reg)
-    CodePatcher patcher(patch_address, 2);
-    Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
-    patcher.masm()->andi(at, reg, kSmiTagMask);
     patcher.ChangeBranchCondition(ne);
   } else {
     ASSERT(Assembler::IsBne(branch_instr));
-    // This is patching a "jump if smi" site to be active.
-    // Changing:
-    //   andi at, rx, 0
-    //   Branch <target>, ne, at, Operand(zero_reg)
-    // to:
-    //   andi at, rx, #kSmiTagMask
-    //   Branch <target>, eq, at, Operand(zero_reg)
-    CodePatcher patcher(patch_address, 2);
-    Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
-    patcher.masm()->andi(at, reg, kSmiTagMask);
     patcher.ChangeBranchCondition(eq);
   }
 }
index 18f6a85..a4de721 100644 (file)
@@ -172,13 +172,12 @@ bool LCodeGen::GeneratePrologue() {
 
   // Possibly allocate a local context.
   int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
-  if (heap_slots > 0 ||
-      (scope()->is_qml_mode() && scope()->is_global_scope())) {
+  if (heap_slots > 0) {
     Comment(";;; Allocate local context");
     // Argument to NewContext is the function, which is in a1.
     __ push(a1);
     if (heap_slots <= FastNewContextStub::kMaximumSlots) {
-      FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
+      FastNewContextStub stub(heap_slots);
       __ CallStub(&stub);
     } else {
       __ CallRuntime(Runtime::kNewFunctionContext, 1);
@@ -2140,8 +2139,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                   RelocInfo::CODE_TARGET,
                   instr,
                   RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
-  ASSERT(instr->HasDeoptimizationEnvironment());
-  LEnvironment* env = instr->deoptimization_environment();
+  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   // Put the result value into the result register slot and
   // restore all registers.
@@ -2345,40 +2343,37 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
   Register object = ToRegister(instr->object());
   Register result = ToRegister(instr->result());
   Register scratch = scratch0();
+
   int map_count = instr->hydrogen()->types()->length();
+  bool need_generic = instr->hydrogen()->need_generic();
+
+  if (map_count == 0 && !need_generic) {
+    DeoptimizeIf(al, instr->environment());
+    return;
+  }
   Handle<String> name = instr->hydrogen()->name();
-  if (map_count == 0) {
-    ASSERT(instr->hydrogen()->need_generic());
-    __ li(a2, Operand(name));
-    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-    CallCode(ic, RelocInfo::CODE_TARGET, instr);
-  } else {
-    Label done;
-    __ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
-    for (int i = 0; i < map_count - 1; ++i) {
-      Handle<Map> map = instr->hydrogen()->types()->at(i);
+  Label done;
+  __ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+  for (int i = 0; i < map_count; ++i) {
+    bool last = (i == map_count - 1);
+    Handle<Map> map = instr->hydrogen()->types()->at(i);
+    if (last && !need_generic) {
+      DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
+    } else {
       Label next;
       __ Branch(&next, ne, scratch, Operand(map));
       EmitLoadFieldOrConstantFunction(result, object, map, name);
       __ Branch(&done);
       __ bind(&next);
     }
-    Handle<Map> map = instr->hydrogen()->types()->last();
-    if (instr->hydrogen()->need_generic()) {
-      Label generic;
-      __ Branch(&generic, ne, scratch, Operand(map));
-      EmitLoadFieldOrConstantFunction(result, object, map, name);
-      __ Branch(&done);
-      __ bind(&generic);
-      __ li(a2, Operand(name));
-      Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-      CallCode(ic, RelocInfo::CODE_TARGET, instr);
-    } else {
-      DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
-      EmitLoadFieldOrConstantFunction(result, object, map, name);
-    }
-    __ bind(&done);
   }
+  if (need_generic) {
+    __ li(a2, Operand(name));
+    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+    CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  }
+  __ bind(&done);
 }
 
 
@@ -2652,16 +2647,20 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   Register temp = scratch1();
   Register result = ToRegister(instr->result());
 
-  // Check if the calling frame is an arguments adaptor frame.
-  Label done, adapted;
-  __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
-  __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
-  // Result is the frame pointer for the frame if not adapted and for the real
-  // frame below the adaptor frame if adapted.
-  __ Movn(result, fp, temp);  // Move only if temp is not equal to zero (ne).
-  __ Movz(result, scratch, temp);  // Move only if temp is equal to zero (eq).
+  if (instr->hydrogen()->from_inlined()) {
+    __ Subu(result, sp, 2 * kPointerSize);
+  } else {
+    // Check if the calling frame is an arguments adaptor frame.
+    Label done, adapted;
+    __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
+    __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+    // Result is the frame pointer for the frame if not adapted and for the real
+    // frame below the adaptor frame if adapted.
+    __ Movn(result, fp, temp);  // Move only if temp is not equal to zero (ne).
+    __ Movz(result, scratch, temp);  // Move only if temp is equal to zero (eq).
+  }
 }
 
 
@@ -2769,7 +2768,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
   __ sll(scratch, length, 2);
 
   __ bind(&invoke);
-  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  ASSERT(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
   SafepointGenerator safepoint_generator(
@@ -2794,6 +2793,11 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
 }
 
 
+void LCodeGen::DoDrop(LDrop* instr) {
+  __ Drop(instr->count());
+}
+
+
 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   Register result = ToRegister(instr->result());
   __ LoadHeapObject(result, instr->hydrogen()->closure());
@@ -2825,7 +2829,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
 
 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
   Register result = ToRegister(instr->result());
-  __ lw(result, ContextOperand(cp, instr->qml_global()?Context::QML_GLOBAL_INDEX:Context::GLOBAL_INDEX));
+  __ lw(result, ContextOperand(cp, Context::GLOBAL_INDEX));
 }
 
 
@@ -2839,7 +2843,8 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
                                  int arity,
                                  LInstruction* instr,
-                                 CallKind call_kind) {
+                                 CallKind call_kind,
+                                 A1State a1_state) {
   bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
       function->shared()->formal_parameter_count() == arity;
 
@@ -2847,7 +2852,10 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
   RecordPosition(pointers->position());
 
   if (can_invoke_directly) {
-    __ LoadHeapObject(a1, function);
+    if (a1_state == A1_UNINITIALIZED) {
+      __ LoadHeapObject(a1, function);
+    }
+
     // Change context if needed.
     bool change_context =
         (info()->closure()->context() != function->context()) ||
@@ -2884,7 +2892,11 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   ASSERT(ToRegister(instr->result()).is(v0));
   __ mov(a0, v0);
-  CallKnownFunction(instr->function(), instr->arity(), instr, CALL_AS_METHOD);
+  CallKnownFunction(instr->function(),
+                    instr->arity(),
+                    instr,
+                    CALL_AS_METHOD,
+                    A1_UNINITIALIZED);
 }
 
 
@@ -3210,7 +3222,7 @@ void LCodeGen::DoRandom(LRandom* instr) {
   // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
   __ And(a3, a1, Operand(0xFFFF));
   __ li(t0, Operand(18273));
-  __ mul(a3, a3, t0);
+  __ Mul(a3, a3, t0);
   __ srl(a1, a1, 16);
   __ Addu(a1, a3, a1);
   // Save state[0].
@@ -3219,7 +3231,7 @@ void LCodeGen::DoRandom(LRandom* instr) {
   // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
   __ And(a3, a0, Operand(0xFFFF));
   __ li(t0, Operand(36969));
-  __ mul(a3, a3, t0);
+  __ Mul(a3, a3, t0);
   __ srl(a0, a0, 16),
   __ Addu(a0, a3, a0);
   // Save state[1].
@@ -3320,13 +3332,21 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
   ASSERT(ToRegister(instr->function()).is(a1));
   ASSERT(instr->HasPointerMap());
-  ASSERT(instr->HasDeoptimizationEnvironment());
-  LPointerMap* pointers = instr->pointer_map();
-  RecordPosition(pointers->position());
-  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-  ParameterCount count(instr->arity());
-  __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
-  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+  if (instr->known_function().is_null()) {
+    LPointerMap* pointers = instr->pointer_map();
+    RecordPosition(pointers->position());
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(instr->arity());
+    __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  } else {
+    CallKnownFunction(instr->known_function(),
+                      instr->arity(),
+                      instr,
+                      CALL_AS_METHOD,
+                      A1_CONTAINS_TARGET);
+  }
 }
 
 
@@ -3381,7 +3401,11 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
 
 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   ASSERT(ToRegister(instr->result()).is(v0));
-  CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
+  CallKnownFunction(instr->target(),
+                    instr->arity(),
+                    instr,
+                    CALL_AS_FUNCTION,
+                    A1_UNINITIALIZED);
 }
 
 
@@ -3537,14 +3561,16 @@ void LCodeGen::DoStoreKeyedFastDoubleElement(
             Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
   }
 
-  Label is_nan;
-  // Check for NaN. All NaNs must be canonicalized.
-  __ BranchF(NULL, &is_nan, eq, value, value);
-  __ Branch(&not_nan);
+  if (instr->NeedsCanonicalization()) {
+    Label is_nan;
+    // Check for NaN. All NaNs must be canonicalized.
+    __ BranchF(NULL, &is_nan, eq, value, value);
+    __ Branch(&not_nan);
 
-  // Only load canonical NaN if the comparison above set the overflow.
-  __ bind(&is_nan);
-  __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+    // Only load canonical NaN if the comparison above set the overflow.
+    __ bind(&is_nan);
+    __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+  }
 
   __ bind(&not_nan);
   __ sdc1(value, MemOperand(scratch));
@@ -4545,9 +4571,10 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
         __ sw(a2, FieldMemOperand(result, total_offset + 4));
       }
     } else if (elements->IsFixedArray()) {
+      Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
       for (int i = 0; i < elements_length; i++) {
         int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
-        Handle<Object> value = JSObject::GetElement(object, i);
+        Handle<Object> value(fast_elements->get(i));
         if (value->IsJSObject()) {
           Handle<JSObject> value_object = Handle<JSObject>::cast(value);
           __ Addu(a2, result, Operand(*offset));
@@ -4571,6 +4598,23 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
 
 void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
   int size = instr->hydrogen()->total_size();
+  ElementsKind boilerplate_elements_kind =
+      instr->hydrogen()->boilerplate()->GetElementsKind();
+
+  // Deopt if the literal boilerplate ElementsKind is of a type different than
+  // the expected one. The check isn't necessary if the boilerplate has already
+  // been converted to FAST_ELEMENTS.
+  if (boilerplate_elements_kind != FAST_ELEMENTS) {
+    __ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
+    // Load map into a2.
+    __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+    // Load the map's "bit field 2".
+    __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
+    // Retrieve elements_kind from bit field 2.
+    __ Ext(a2, a2, Map::kElementsKindShift, Map::kElementsKindBitCount);
+    DeoptimizeIf(ne, instr->environment(), a2,
+        Operand(boilerplate_elements_kind));
+  }
 
   // Allocate all objects that are part of the literal in one big
   // allocation. This avoids multiple limit checks.
@@ -4906,7 +4950,7 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
   Register strict = scratch0();
   __ li(strict, Operand(Smi::FromInt(strict_mode_flag())));
   __ Push(object, key, strict);
-  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  ASSERT(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
   SafepointGenerator safepoint_generator(
@@ -4919,7 +4963,7 @@ void LCodeGen::DoIn(LIn* instr) {
   Register obj = ToRegister(instr->object());
   Register key = ToRegister(instr->key());
   __ Push(key, obj);
-  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  ASSERT(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
   SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
index b508256..94bb945 100644 (file)
@@ -212,12 +212,18 @@ class LCodeGen BASE_EMBEDDED {
                                int argc,
                                LInstruction* instr);
 
+  enum A1State {
+    A1_UNINITIALIZED,
+    A1_CONTAINS_TARGET
+  };
+
   // Generate a direct call to a known function.  Expects the function
   // to be in a1.
   void CallKnownFunction(Handle<JSFunction> function,
                          int arity,
                          LInstruction* instr,
-                         CallKind call_kind);
+                         CallKind call_kind,
+                         A1State a1_state);
 
   void LoadHeapObject(Register result, Handle<HeapObject> object);
 
index 48b4ca2..1eb3ab7 100644 (file)
@@ -108,22 +108,17 @@ void LInstruction::PrintTo(StringStream* stream) {
 }
 
 
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
+void LInstruction::PrintDataTo(StringStream* stream) {
   stream->Add("= ");
-  for (int i = 0; i < inputs_.length(); i++) {
+  for (int i = 0; i < InputCount(); i++) {
     if (i > 0) stream->Add(" ");
-    inputs_[i]->PrintTo(stream);
+    InputAt(i)->PrintTo(stream);
   }
 }
 
 
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
-  for (int i = 0; i < results_.length(); i++) {
-    if (i > 0) stream->Add(" ");
-    results_[i]->PrintTo(stream);
-  }
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+  if (HasResult()) result()->PrintTo(stream);
 }
 
 
@@ -732,22 +727,6 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
 }
 
 
-LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
-    LInstruction* instr, int ast_id) {
-  ASSERT(instruction_pending_deoptimization_environment_ == NULL);
-  ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
-  instruction_pending_deoptimization_environment_ = instr;
-  pending_deoptimization_ast_id_ = ast_id;
-  return instr;
-}
-
-
-void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
-  instruction_pending_deoptimization_environment_ = NULL;
-  pending_deoptimization_ast_id_ = AstNode::kNoNumber;
-}
-
-
 LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
                                         HInstruction* hinstr,
                                         CanDeoptimize can_deoptimize) {
@@ -760,8 +739,10 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
   if (hinstr->HasObservableSideEffects()) {
     ASSERT(hinstr->next()->IsSimulate());
     HSimulate* sim = HSimulate::cast(hinstr->next());
-    instr = SetInstructionPendingDeoptimizationEnvironment(
-        instr, sim->ast_id());
+    ASSERT(instruction_pending_deoptimization_environment_ == NULL);
+    ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+    instruction_pending_deoptimization_environment_ = instr;
+    pending_deoptimization_ast_id_ = sim->ast_id();
   }
 
   // If instruction does not have side-effects lazy deoptimization
@@ -779,12 +760,6 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
 }
 
 
-LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
-  instr->MarkAsSaveDoubles();
-  return instr;
-}
-
-
 LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
   ASSERT(!instr->HasPointerMap());
   instr->set_pointer_map(new(zone()) LPointerMap(position_));
@@ -1150,7 +1125,7 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
 
 LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LGlobalObject(context, instr->qml_global()));
+  return DefineAsRegister(new(zone()) LGlobalObject(context));
 }
 
 
@@ -1225,7 +1200,7 @@ LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
 
 LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new(zone()) LCallGlobal(instr->qml_global()), v0), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, v0), instr);
 }
 
 
@@ -1296,6 +1271,7 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
 LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
   ASSERT(instr->value()->representation().IsInteger32());
   ASSERT(instr->representation().IsInteger32());
+  if (instr->HasNoUses()) return NULL;
   LOperand* value = UseRegisterAtStart(instr->value());
   return DefineAsRegister(new(zone()) LBitNotI(value));
 }
@@ -1320,6 +1296,12 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
 }
 
 
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+
 LInstruction* LChunkBuilder::DoMod(HMod* instr) {
   if (instr->representation().IsInteger32()) {
     ASSERT(instr->left()->representation().IsInteger32());
@@ -2247,9 +2229,12 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
   if (pending_deoptimization_ast_id_ == instr->ast_id()) {
     LInstruction* result = new(zone()) LLazyBailout;
     result = AssignEnvironment(result);
+    // Store the lazy deopt environment with the instruction if needed. Right
+    // now it is only used for LInstanceOfKnownGlobal.
     instruction_pending_deoptimization_environment_->
-        set_deoptimization_environment(result->environment());
-    ClearInstructionPendingDeoptimizationEnvironment();
+        SetDeferredLazyDeoptimizationEnvironment(result->environment());
+    instruction_pending_deoptimization_environment_ = NULL;
+    pending_deoptimization_ast_id_ = AstNode::kNoNumber;
     return result;
   }
 
@@ -2276,8 +2261,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
                                                undefined,
                                                instr->call_kind(),
                                                instr->is_construct());
-  if (instr->arguments() != NULL) {
-    inner->Bind(instr->arguments(), graph()->GetArgumentsObject());
+  if (instr->arguments_var() != NULL) {
+    inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
   }
   current_block_->UpdateEnvironment(inner);
   chunk_->AddInlinedClosure(instr->closure());
@@ -2286,10 +2271,21 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
 
 
 LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+  LInstruction* pop = NULL;
+
+  HEnvironment* env = current_block_->last_environment();
+
+  if (instr->arguments_pushed()) {
+    int argument_count = env->arguments_environment()->parameter_count();
+    pop = new(zone()) LDrop(argument_count);
+    argument_count_ -= argument_count;
+  }
+
   HEnvironment* outer = current_block_->last_environment()->
       DiscardInlined(false);
   current_block_->UpdateEnvironment(outer);
-  return NULL;
+
+  return pop;
 }
 
 
index e1fec8b..a04b429 100644 (file)
@@ -179,7 +179,8 @@ class LCodeGen;
   V(CheckMapValue)                              \
   V(LoadFieldByIndex)                           \
   V(DateField)                                  \
-  V(WrapReceiver)
+  V(WrapReceiver)                               \
+  V(Drop)
 
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
   virtual Opcode opcode() const { return LInstruction::k##type; } \
@@ -202,15 +203,14 @@ class LInstruction: public ZoneObject {
   LInstruction()
       :  environment_(NULL),
          hydrogen_value_(NULL),
-         is_call_(false),
-         is_save_doubles_(false) { }
+         is_call_(false) { }
   virtual ~LInstruction() { }
 
   virtual void CompileToNative(LCodeGen* generator) = 0;
   virtual const char* Mnemonic() const = 0;
   virtual void PrintTo(StringStream* stream);
-  virtual void PrintDataTo(StringStream* stream) = 0;
-  virtual void PrintOutputOperandTo(StringStream* stream) = 0;
+  virtual void PrintDataTo(StringStream* stream);
+  virtual void PrintOutputOperandTo(StringStream* stream);
 
   enum Opcode {
     // Declare a unique enum value for each instruction.
@@ -245,22 +245,12 @@ class LInstruction: public ZoneObject {
   void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
   HValue* hydrogen_value() const { return hydrogen_value_; }
 
-  void set_deoptimization_environment(LEnvironment* env) {
-    deoptimization_environment_.set(env);
-  }
-  LEnvironment* deoptimization_environment() const {
-    return deoptimization_environment_.get();
-  }
-  bool HasDeoptimizationEnvironment() const {
-    return deoptimization_environment_.is_set();
-  }
+  virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
 
   void MarkAsCall() { is_call_ = true; }
-  void MarkAsSaveDoubles() { is_save_doubles_ = true; }
 
   // Interface to the register allocator and iterators.
   bool IsMarkedAsCall() const { return is_call_; }
-  bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
 
   virtual bool HasResult() const = 0;
   virtual LOperand* result() = 0;
@@ -281,7 +271,6 @@ class LInstruction: public ZoneObject {
   LEnvironment* environment_;
   SetOncePointer<LPointerMap> pointer_map_;
   HValue* hydrogen_value_;
-  SetOncePointer<LEnvironment> deoptimization_environment_;
   bool is_call_;
   bool is_save_doubles_;
 };
@@ -305,9 +294,6 @@ class LTemplateInstruction: public LInstruction {
   int TempCount() { return T; }
   LOperand* TempAt(int i) { return temps_[i]; }
 
-  virtual void PrintDataTo(StringStream* stream);
-  virtual void PrintOutputOperandTo(StringStream* stream);
-
  protected:
   EmbeddedContainer<LOperand*, R> results_;
   EmbeddedContainer<LOperand*, I> inputs_;
@@ -533,9 +519,8 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
 
 class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
  public:
-  LArgumentsElements() { }
-
   DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+  DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
 };
 
 
@@ -833,6 +818,15 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
   DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
 
   Handle<JSFunction> function() const { return hydrogen()->function(); }
+  LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
+    return lazy_deopt_env_;
+  }
+  virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
+    lazy_deopt_env_ = env;
+  }
+
+ private:
+  LEnvironment* lazy_deopt_env_;
 };
 
 
@@ -1358,6 +1352,19 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
 };
 
 
+class LDrop: public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LDrop(int count) : count_(count) { }
+
+  int count() const { return count_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+  int count_;
+};
+
+
 class LThisFunction: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
@@ -1392,17 +1399,13 @@ class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
 
 class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
  public:
-  explicit LGlobalObject(LOperand* context, bool qml_global) {
+  explicit LGlobalObject(LOperand* context) {
     inputs_[0] = context;
-    qml_global_ = qml_global;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
 
   LOperand* context() { return InputAt(0); }
-  bool qml_global() { return qml_global_; }
- private:
-  bool qml_global_;
 };
 
 
@@ -1444,6 +1447,7 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
   virtual void PrintDataTo(StringStream* stream);
 
   int arity() const { return hydrogen()->argument_count() - 1; }
+  Handle<JSFunction> known_function() { return hydrogen()->known_function(); }
 };
 
 
@@ -1494,16 +1498,10 @@ class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
   DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
 
-  explicit LCallGlobal(bool qml_global) : qml_global_(qml_global) {}
-
   virtual void PrintDataTo(StringStream* stream);
 
   Handle<String> name() const {return hydrogen()->name(); }
   int arity() const { return hydrogen()->argument_count() - 1; }
-
-  bool qml_global() { return qml_global_; }
- private:
-  bool qml_global_;
 };
 
 
@@ -1729,6 +1727,8 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
   LOperand* elements() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
+
+  bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
 };
 
 
@@ -2359,11 +2359,6 @@ class LChunkBuilder BASE_EMBEDDED {
       LInstruction* instr,
       HInstruction* hinstr,
       CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
-  LInstruction* MarkAsSaveDoubles(LInstruction* instr);
-
-  LInstruction* SetInstructionPendingDeoptimizationEnvironment(
-      LInstruction* instr, int ast_id);
-  void ClearInstructionPendingDeoptimizationEnvironment();
 
   LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
                                   int* argument_index_accumulator);
index e93a417..2c2445b 100644 (file)
@@ -5378,7 +5378,7 @@ CodePatcher::CodePatcher(byte* address, int instructions)
     : address_(address),
       instructions_(instructions),
       size_(instructions * Assembler::kInstrSize),
-      masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
+      masm_(NULL, address, size_ + Assembler::kGap) {
   // Create a new macro assembler pointing to the address of the code to patch.
   // The size is adjusted with kGap on order for the assembler to generate size
   // bytes of instructions without failing with buffer size constraints.
index b5897e4..f57418f 100644 (file)
@@ -112,11 +112,6 @@ inline MemOperand GlobalObjectOperand()  {
 }
 
 
-static inline MemOperand QmlGlobalObjectOperand()  {
-  return ContextOperand(cp, Context::QML_GLOBAL_INDEX);
-}
-
-
 // Generate a MemOperand for loading a field from an object.
 inline MemOperand FieldMemOperand(Register object, int offset) {
   return MemOperand(object, offset - kHeapObjectTag);
index dde4a65..c48bcc4 100644 (file)
@@ -158,7 +158,7 @@ int RegExpMacroAssemblerMIPS::stack_limit_slack()  {
 void RegExpMacroAssemblerMIPS::AdvanceCurrentPosition(int by) {
   if (by != 0) {
     __ Addu(current_input_offset(),
-           current_input_offset(), Operand(by * char_size()));
+            current_input_offset(), Operand(by * char_size()));
   }
 }
 
@@ -229,9 +229,9 @@ void RegExpMacroAssemblerMIPS::CheckCharacterLT(uc16 limit, Label* on_less) {
 
 
 void RegExpMacroAssemblerMIPS::CheckCharacters(Vector<const uc16> str,
-                                              int cp_offset,
-                                              Label* on_failure,
-                                              bool check_end_of_string) {
+                                               int cp_offset,
+                                               Label* on_failure,
+                                               bool check_end_of_string) {
   if (on_failure == NULL) {
     // Instead of inlining a backtrack for each test, (re)use the global
     // backtrack target.
@@ -452,24 +452,26 @@ void RegExpMacroAssemblerMIPS::CheckNotRegistersEqual(int reg1,
 
 
 void RegExpMacroAssemblerMIPS::CheckNotCharacter(uint32_t c,
-                                                Label* on_not_equal) {
+                                                 Label* on_not_equal) {
   BranchOrBacktrack(on_not_equal, ne, current_character(), Operand(c));
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckCharacterAfterAnd(uint32_t c,
-                                                     uint32_t mask,
-                                                     Label* on_equal) {
+                                                      uint32_t mask,
+                                                      Label* on_equal) {
   __ And(a0, current_character(), Operand(mask));
-  BranchOrBacktrack(on_equal, eq, a0, Operand(c));
+  Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c);
+  BranchOrBacktrack(on_equal, eq, a0, rhs);
 }
 
 
 void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterAnd(uint32_t c,
-                                                        uint32_t mask,
-                                                        Label* on_not_equal) {
+                                                         uint32_t mask,
+                                                         Label* on_not_equal) {
   __ And(a0, current_character(), Operand(mask));
-  BranchOrBacktrack(on_not_equal, ne, a0, Operand(c));
+  Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c);
+  BranchOrBacktrack(on_not_equal, ne, a0, rhs);
 }
 
 
@@ -478,7 +480,10 @@ void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterMinusAnd(
     uc16 minus,
     uc16 mask,
     Label* on_not_equal) {
-  UNIMPLEMENTED_MIPS();
+  ASSERT(minus < String::kMaxUtf16CodeUnit);
+  __ Subu(a0, current_character(), Operand(minus));
+  __ And(a0, a0, Operand(mask));
+  BranchOrBacktrack(on_not_equal, ne, a0, Operand(c));
 }
 
 
@@ -519,7 +524,7 @@ void RegExpMacroAssemblerMIPS::CheckBitInTable(
 
 
 bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
-                                                         Label* on_no_match) {
+                                                          Label* on_no_match) {
   // Range checks (c in min..max) are generally implemented by an unsigned
   // (c - min) <= (max - min) check.
   switch (type) {
@@ -884,23 +889,23 @@ void RegExpMacroAssemblerMIPS::GoTo(Label* to) {
 
 
 void RegExpMacroAssemblerMIPS::IfRegisterGE(int reg,
-                                           int comparand,
-                                           Label* if_ge) {
+                                            int comparand,
+                                            Label* if_ge) {
   __ lw(a0, register_location(reg));
     BranchOrBacktrack(if_ge, ge, a0, Operand(comparand));
 }
 
 
 void RegExpMacroAssemblerMIPS::IfRegisterLT(int reg,
-                                           int comparand,
-                                           Label* if_lt) {
+                                            int comparand,
+                                            Label* if_lt) {
   __ lw(a0, register_location(reg));
   BranchOrBacktrack(if_lt, lt, a0, Operand(comparand));
 }
 
 
 void RegExpMacroAssemblerMIPS::IfRegisterEqPos(int reg,
-                                              Label* if_eq) {
+                                               Label* if_eq) {
   __ lw(a0, register_location(reg));
   BranchOrBacktrack(if_eq, eq, a0, Operand(current_input_offset()));
 }
@@ -913,9 +918,9 @@ RegExpMacroAssembler::IrregexpImplementation
 
 
 void RegExpMacroAssemblerMIPS::LoadCurrentCharacter(int cp_offset,
-                                                   Label* on_end_of_input,
-                                                   bool check_bounds,
-                                                   int characters) {
+                                                    Label* on_end_of_input,
+                                                    bool check_bounds,
+                                                    int characters) {
   ASSERT(cp_offset >= -1);      // ^ and \b can look behind one character.
   ASSERT(cp_offset < (1<<30));  // Be sane! (And ensure negation works).
   if (check_bounds) {
@@ -966,7 +971,7 @@ void RegExpMacroAssemblerMIPS::PushCurrentPosition() {
 
 
 void RegExpMacroAssemblerMIPS::PushRegister(int register_index,
-                                           StackCheckFlag check_stack_limit) {
+                                            StackCheckFlag check_stack_limit) {
   __ lw(a0, register_location(register_index));
   Push(a0);
   if (check_stack_limit) CheckStackLimit();
@@ -1013,7 +1018,7 @@ void RegExpMacroAssemblerMIPS::Succeed() {
 
 
 void RegExpMacroAssemblerMIPS::WriteCurrentPositionToRegister(int reg,
-                                                             int cp_offset) {
+                                                              int cp_offset) {
   if (cp_offset == 0) {
     __ sw(current_input_offset(), register_location(reg));
   } else {
@@ -1170,7 +1175,7 @@ MemOperand RegExpMacroAssemblerMIPS::register_location(int register_index) {
 
 
 void RegExpMacroAssemblerMIPS::CheckPosition(int cp_offset,
-                                            Label* on_outside_input) {
+                                             Label* on_outside_input) {
   BranchOrBacktrack(on_outside_input,
                     ge,
                     current_input_offset(),
@@ -1198,8 +1203,10 @@ void RegExpMacroAssemblerMIPS::BranchOrBacktrack(Label* to,
 }
 
 
-void RegExpMacroAssemblerMIPS::SafeCall(Label* to, Condition cond, Register rs,
-                                           const Operand& rt) {
+void RegExpMacroAssemblerMIPS::SafeCall(Label* to,
+                                        Condition cond,
+                                        Register rs,
+                                        const Operand& rt) {
   __ BranchAndLink(to, cond, rs, rt);
 }
 
@@ -1270,7 +1277,7 @@ void RegExpMacroAssemblerMIPS::CallCFunctionUsingStub(
 
 
 void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
-                                                            int characters) {
+                                                             int characters) {
   Register offset = current_input_offset();
   if (cp_offset != 0) {
     __ Addu(a0, current_input_offset(), Operand(cp_offset * char_size()));
index ec63fb6..18a5f5f 100644 (file)
@@ -429,8 +429,10 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
   // a0 : value.
   Label exit;
   // Check that the map of the object hasn't changed.
+  CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
+                                             : REQUIRE_EXACT_MAP;
   __ CheckMap(receiver_reg, scratch, Handle<Map>(object->map()), miss_label,
-              DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+              DO_SMI_CHECK, mode);
 
   // Perform global security token check if needed.
   if (object->IsJSGlobalProxy()) {
@@ -563,6 +565,8 @@ static void PushInterceptorArguments(MacroAssembler* masm,
   __ Push(scratch, receiver, holder);
   __ lw(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
   __ push(scratch);
+  __ li(scratch, Operand(ExternalReference::isolate_address()));
+  __ push(scratch);
 }
 
 
@@ -577,7 +581,7 @@ static void CompileCallLoadPropertyWithInterceptor(
   ExternalReference ref =
       ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
           masm->isolate());
-  __ PrepareCEntryArgs(5);
+  __ PrepareCEntryArgs(6);
   __ PrepareCEntryFunction(ref);
 
   CEntryStub stub(1);
@@ -585,10 +589,10 @@ static void CompileCallLoadPropertyWithInterceptor(
 }
 
 
-static const int kFastApiCallArguments = 3;
+static const int kFastApiCallArguments = 4;
 
 
-// Reserves space for the extra arguments to FastHandleApiCall in the
+// Reserves space for the extra arguments to API function in the
 // caller's frame.
 //
 // These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
@@ -614,7 +618,8 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
   //  -- sp[0]              : holder (set by CheckPrototypes)
   //  -- sp[4]              : callee JS function
   //  -- sp[8]              : call data
-  //  -- sp[12]             : last JS argument
+  //  -- sp[12]             : isolate
+  //  -- sp[16]             : last JS argument
   //  -- ...
   //  -- sp[(argc + 3) * 4] : first JS argument
   //  -- sp[(argc + 4) * 4] : receiver
@@ -624,7 +629,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
   __ LoadHeapObject(t1, function);
   __ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
 
-  // Pass the additional arguments FastHandleApiCall expects.
+  // Pass the additional arguments.
   Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
   Handle<Object> call_data(api_call_info->data());
   if (masm->isolate()->heap()->InNewSpace(*call_data)) {
@@ -634,14 +639,17 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
     __ li(t2, call_data);
   }
 
-  // Store JS function and call data.
+  __ li(t3, Operand(ExternalReference::isolate_address()));
+  // Store JS function, call data and isolate.
   __ sw(t1, MemOperand(sp, 1 * kPointerSize));
   __ sw(t2, MemOperand(sp, 2 * kPointerSize));
+  __ sw(t3, MemOperand(sp, 3 * kPointerSize));
 
-  // a2 points to call data as expected by Arguments
-  // (refer to layout above).
-  __ Addu(a2, sp, Operand(2 * kPointerSize));
+  // Prepare arguments.
+  __ Addu(a2, sp, Operand(3 * kPointerSize));
 
+  // Allocate the v8::Arguments structure in the arguments' space since
+  // it's not controlled by GC.
   const int kApiStackSpace = 4;
 
   FrameScope frame_scope(masm, StackFrame::MANUAL);
@@ -656,9 +664,9 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
   // Arguments is built at sp + 1 (sp is a reserved spot for ra).
   __ Addu(a1, sp, kPointerSize);
 
-  // v8::Arguments::implicit_args = data
+  // v8::Arguments::implicit_args_
   __ sw(a2, MemOperand(a1, 0 * kPointerSize));
-  // v8::Arguments::values = last argument
+  // v8::Arguments::values_
   __ Addu(t0, a2, Operand(argc * kPointerSize));
   __ sw(t0, MemOperand(a1, 1 * kPointerSize));
   // v8::Arguments::length_ = argc
@@ -836,7 +844,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
           ExternalReference(
               IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
               masm->isolate()),
-          5);
+          6);
     // Restore the name_ register.
     __ pop(name_);
     // Leave the internal frame.
@@ -1204,7 +1212,13 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
   } else {
     __ li(scratch3, Handle<Object>(callback->data()));
   }
-  __ Push(reg, scratch3, name_reg);
+  __ Subu(sp, sp, 4 * kPointerSize);
+  __ sw(reg, MemOperand(sp, 3 * kPointerSize));
+  __ sw(scratch3, MemOperand(sp, 2 * kPointerSize));
+  __ li(scratch3, Operand(ExternalReference::isolate_address()));
+  __ sw(scratch3, MemOperand(sp, 1 * kPointerSize));
+  __ sw(name_reg, MemOperand(sp, 0 * kPointerSize));
+
   __ mov(a2, scratch2);  // Saved in case scratch2 == a1.
   __ mov(a1, sp);  // a1 (first argument - see note below) = Handle<String>
 
@@ -1223,7 +1237,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
   // a2 (second argument - see note above) = AccessorInfo&
   __ Addu(a2, sp, kPointerSize);
 
-  const int kStackUnwindSpace = 4;
+  const int kStackUnwindSpace = 5;
   Address getter_address = v8::ToCData<Address>(callback->getter());
   ApiFunction fun(getter_address);
   ExternalReference ref =
@@ -1273,12 +1287,19 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
                                           name, miss);
     ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
 
+    // Preserve the receiver register explicitly whenever it is different from
+    // the holder and it is needed should the interceptor return without any
+    // result. The CALLBACKS case needs the receiver to be passed into C++ code,
+    // the FIELD case might cause a miss during the prototype check.
+    bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
+    bool must_preserve_receiver_reg = !receiver.is(holder_reg) &&
+        (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
+
     // Save necessary data before invoking an interceptor.
     // Requires a frame to make GC aware of pushed pointers.
     {
       FrameScope frame_scope(masm(), StackFrame::INTERNAL);
-      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-        // CALLBACKS case needs a receiver to be passed into C++ callback.
+      if (must_preserve_receiver_reg) {
         __ Push(receiver, holder_reg, name_reg);
       } else {
         __ Push(holder_reg, name_reg);
@@ -1302,14 +1323,14 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
       __ bind(&interceptor_failed);
       __ pop(name_reg);
       __ pop(holder_reg);
-      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+      if (must_preserve_receiver_reg) {
         __ pop(receiver);
       }
       // Leave the internal frame.
     }
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into |holder| register.
-    if (*interceptor_holder != lookup->holder()) {
+    if (must_perfrom_prototype_check) {
       holder_reg = CheckPrototypes(interceptor_holder,
                                    holder_reg,
                                    Handle<JSObject>(lookup->holder()),
@@ -1339,24 +1360,17 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
       // Important invariant in CALLBACKS case: the code above must be
       // structured to never clobber |receiver| register.
       __ li(scratch2, callback);
-      // holder_reg is either receiver or scratch1.
-      if (!receiver.is(holder_reg)) {
-        ASSERT(scratch1.is(holder_reg));
-        __ Push(receiver, holder_reg);
-        __ lw(scratch3,
-              FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
-        __ Push(scratch3, scratch2, name_reg);
-      } else {
-        __ push(receiver);
-        __ lw(scratch3,
-              FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
-        __ Push(holder_reg, scratch3, scratch2, name_reg);
-      }
+
+      __ Push(receiver, holder_reg);
+      __ lw(scratch3,
+            FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
+      __ li(scratch1, Operand(ExternalReference::isolate_address()));
+      __ Push(scratch3, scratch1, scratch2, name_reg);
 
       ExternalReference ref =
           ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
                             masm()->isolate());
-      __ TailCallExternalReference(ref, 5, 1);
+      __ TailCallExternalReference(ref, 6, 1);
     }
   } else {  // !compile_followup_inline
     // Call the runtime system to load the interceptor.
@@ -1369,7 +1383,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
 
     ExternalReference ref = ExternalReference(
         IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), masm()->isolate());
-    __ TailCallExternalReference(ref, 5, 1);
+    __ TailCallExternalReference(ref, 6, 1);
   }
 }
 
@@ -3370,6 +3384,45 @@ static bool IsElementTypeSigned(ElementsKind elements_kind) {
 }
 
 
+static void GenerateSmiKeyCheck(MacroAssembler* masm,
+                                Register key,
+                                Register scratch0,
+                                Register scratch1,
+                                FPURegister double_scratch0,
+                                Label* fail) {
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+    Label key_ok;
+    // Check for smi or a smi inside a heap number.  We convert the heap
+    // number and check if the conversion is exact and fits into the smi
+    // range.
+    __ JumpIfSmi(key, &key_ok);
+    __ CheckMap(key,
+                scratch0,
+                Heap::kHeapNumberMapRootIndex,
+                fail,
+                DONT_DO_SMI_CHECK);
+    __ ldc1(double_scratch0, FieldMemOperand(key, HeapNumber::kValueOffset));
+    __ EmitFPUTruncate(kRoundToZero,
+                       double_scratch0,
+                       double_scratch0,
+                       scratch0,
+                       scratch1,
+                       kCheckForInexactConversion);
+
+    __ Branch(fail, ne, scratch1, Operand(zero_reg));
+
+    __ mfc1(scratch0, double_scratch0);
+    __ SmiTagCheckOverflow(key, scratch0, scratch1);
+    __ BranchOnOverflow(fail, scratch1);
+    __ bind(&key_ok);
+  } else {
+    // Check that the key is a smi.
+    __ JumpIfNotSmi(key, fail);
+  }
+}
+
+
 void KeyedLoadStubCompiler::GenerateLoadExternalArray(
     MacroAssembler* masm,
     ElementsKind elements_kind) {
@@ -3386,8 +3439,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
 
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &miss_force_generic);
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, key, t0, t1, f2, &miss_force_generic);
 
   __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
   // a3: elements array
@@ -3725,8 +3778,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
 
-    // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &miss_force_generic);
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, key, t0, t1, f2, &miss_force_generic);
 
   __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
 
@@ -4105,9 +4158,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
 
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(a0, &miss_force_generic, at, USE_DELAY_SLOT);
-  // The delay slot can be safely used here, a1 is an object pointer.
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, a0, t0, t1, f2, &miss_force_generic);
 
   // Get the elements array.
   __ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
@@ -4157,8 +4209,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
 
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key_reg, &miss_force_generic);
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic);
 
   // Get the elements array.
   __ lw(elements_reg,
@@ -4231,8 +4283,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
 
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key_reg, &miss_force_generic);
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic);
 
   if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
     __ JumpIfNotSmi(value_reg, &transition_elements_kind);
@@ -4398,7 +4450,9 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
-  __ JumpIfNotSmi(key_reg, &miss_force_generic);
+
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic);
 
   __ lw(elements_reg,
          FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
@@ -4490,6 +4544,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
     // Increment the length of the array.
     __ li(length_reg, Operand(Smi::FromInt(1)));
     __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+    __ lw(elements_reg,
+          FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
     __ jmp(&finish_store);
 
     __ bind(&check_capacity);
index c43dd22..c7f0dcc 100644 (file)
@@ -596,6 +596,23 @@ ObjectMirror.prototype.protoObject = function() {
 };
 
 
+/**
+ * Return the primitive value if this is object of Boolean, Number or String
+ * type (but not Date). Otherwise return undefined.
+ */
+ObjectMirror.prototype.primitiveValue = function() {
+  if (!IS_STRING_WRAPPER(this.value_) && !IS_NUMBER_WRAPPER(this.value_) &&
+      !IS_BOOLEAN_WRAPPER(this.value_)) {
+    return void 0;
+  }
+  var primitiveValue = %_ValueOf(this.value_);
+  if (IS_UNDEFINED(primitiveValue)) {
+    return void 0;
+  }
+  return MakeMirror(primitiveValue);
+};
+
+
 ObjectMirror.prototype.hasNamedInterceptor = function() {
   // Get information on interceptors for this object.
   var x = %GetInterceptorInfo(this.value_);
@@ -896,6 +913,22 @@ FunctionMirror.prototype.constructedBy = function(opt_max_instances) {
 };
 
 
+FunctionMirror.prototype.scopeCount = function() {
+  if (this.resolved()) {
+    return %GetFunctionScopeCount(this.value());
+  } else {
+    return 0;
+  }
+};
+
+
+FunctionMirror.prototype.scope = function(index) {
+  if (this.resolved()) {
+    return new ScopeMirror(void 0, this, index);
+  }
+};
+
+
 FunctionMirror.prototype.toText = function() {
   return this.source();
 };
@@ -1572,7 +1605,7 @@ FrameMirror.prototype.scopeCount = function() {
 
 
 FrameMirror.prototype.scope = function(index) {
-  return new ScopeMirror(this, index);
+  return new ScopeMirror(this, void 0, index);
 };
 
 
@@ -1735,39 +1768,54 @@ FrameMirror.prototype.toText = function(opt_locals) {
 var kScopeDetailsTypeIndex = 0;
 var kScopeDetailsObjectIndex = 1;
 
-function ScopeDetails(frame, index) {
-  this.break_id_ = frame.break_id_;
-  this.details_ = %GetScopeDetails(frame.break_id_,
-                                   frame.details_.frameId(),
-                                   frame.details_.inlinedFrameIndex(),
-                                   index);
+function ScopeDetails(frame, fun, index) {
+  if (frame) {
+    this.break_id_ = frame.break_id_;
+    this.details_ = %GetScopeDetails(frame.break_id_,
+                                     frame.details_.frameId(),
+                                     frame.details_.inlinedFrameIndex(),
+                                     index);
+  } else {
+    this.details_ = %GetFunctionScopeDetails(fun.value(), index);
+    this.break_id_ = undefined;
+  }
 }
 
 
 ScopeDetails.prototype.type = function() {
-  %CheckExecutionState(this.break_id_);
+  if (!IS_UNDEFINED(this.break_id_)) {
+    %CheckExecutionState(this.break_id_);
+  }
   return this.details_[kScopeDetailsTypeIndex];
 };
 
 
 ScopeDetails.prototype.object = function() {
-  %CheckExecutionState(this.break_id_);
+  if (!IS_UNDEFINED(this.break_id_)) {
+    %CheckExecutionState(this.break_id_);
+  }
   return this.details_[kScopeDetailsObjectIndex];
 };
 
 
 /**
- * Mirror object for scope.
+ * Mirror object for scope of frame or function. Either frame or function must
+ * be specified.
  * @param {FrameMirror} frame The frame this scope is a part of
+ * @param {FunctionMirror} function The function this scope is a part of
  * @param {number} index The scope index in the frame
  * @constructor
  * @extends Mirror
  */
-function ScopeMirror(frame, index) {
+function ScopeMirror(frame, function, index) {
   %_CallFunction(this, SCOPE_TYPE, Mirror);
-  this.frame_index_ = frame.index_;
+  if (frame) {
+    this.frame_index_ = frame.index_;
+  } else {
+    this.frame_index_ = undefined;
+  }
   this.scope_index_ = index;
-  this.details_ = new ScopeDetails(frame, index);
+  this.details_ = new ScopeDetails(frame, function, index);
 }
 inherits(ScopeMirror, Mirror);
 
@@ -2234,6 +2282,11 @@ JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
   content.protoObject = this.serializeReference(mirror.protoObject());
   content.prototypeObject = this.serializeReference(mirror.prototypeObject());
 
+  var primitiveValue = mirror.primitiveValue();
+  if (!IS_UNDEFINED(primitiveValue)) {
+    content.primitiveValue = this.serializeReference(primitiveValue);
+  }
+
   // Add flags to indicate whether there are interceptors.
   if (mirror.hasNamedInterceptor()) {
     content.namedInterceptor = true;
@@ -2259,6 +2312,15 @@ JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
 
       serializeLocationFields(mirror.sourceLocation(), content);
     }
+
+    content.scopes = [];
+    for (var i = 0; i < mirror.scopeCount(); i++) {
+      var scope = mirror.scope(i);
+      content.scopes.push({
+        type: scope.scopeType(),
+        index: i
+      });
+    }
   }
 
   // Add date specific properties.
index 8eefb23..9006abd 100644 (file)
@@ -135,6 +135,9 @@ void HeapObject::HeapObjectVerify() {
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
       JSObject::cast(this)->JSObjectVerify();
       break;
+    case JS_MODULE_TYPE:
+      JSModule::cast(this)->JSModuleVerify();
+      break;
     case JS_VALUE_TYPE:
       JSValue::cast(this)->JSValueVerify();
       break;
@@ -300,6 +303,8 @@ void Map::MapVerify() {
           instance_size() < HEAP->Capacity()));
   VerifyHeapPointer(prototype());
   VerifyHeapPointer(instance_descriptors());
+  SLOW_ASSERT(instance_descriptors()->IsSortedNoDuplicates());
+  SLOW_ASSERT(instance_descriptors()->IsConsistentWithBackPointers(this));
 }
 
 
@@ -366,6 +371,15 @@ void FixedDoubleArray::FixedDoubleArrayVerify() {
 }
 
 
+void JSModule::JSModuleVerify() {
+  Object* v = context();
+  if (v->IsHeapObject()) {
+    VerifyHeapPointer(v);
+  }
+  CHECK(v->IsUndefined() || v->IsModuleContext());
+}
+
+
 void JSValue::JSValueVerify() {
   Object* v = value();
   if (v->IsHeapObject()) {
@@ -882,6 +896,61 @@ bool DescriptorArray::IsSortedNoDuplicates() {
 }
 
 
+static bool CheckOneBackPointer(Map* current_map, Object* target) {
+  return !target->IsMap() || Map::cast(target)->GetBackPointer() == current_map;
+}
+
+
+bool DescriptorArray::IsConsistentWithBackPointers(Map* current_map) {
+  for (int i = 0; i < number_of_descriptors(); ++i) {
+    switch (GetType(i)) {
+      case MAP_TRANSITION:
+      case CONSTANT_TRANSITION:
+        if (!CheckOneBackPointer(current_map, GetValue(i))) {
+          return false;
+        }
+        break;
+      case ELEMENTS_TRANSITION: {
+        Object* object = GetValue(i);
+        if (!CheckOneBackPointer(current_map, object)) {
+          return false;
+        }
+        if (object->IsFixedArray()) {
+          FixedArray* array = FixedArray::cast(object);
+          for (int i = 0; i < array->length(); ++i) {
+            if (!CheckOneBackPointer(current_map, array->get(i))) {
+              return false;
+            }
+          }
+        }
+        break;
+      }
+      case CALLBACKS: {
+        Object* object = GetValue(i);
+        if (object->IsAccessorPair()) {
+          AccessorPair* accessors = AccessorPair::cast(object);
+          if (!CheckOneBackPointer(current_map, accessors->getter())) {
+            return false;
+          }
+          if (!CheckOneBackPointer(current_map, accessors->setter())) {
+            return false;
+          }
+        }
+        break;
+      }
+      case NORMAL:
+      case FIELD:
+      case CONSTANT_FUNCTION:
+      case HANDLER:
+      case INTERCEPTOR:
+      case NULL_DESCRIPTOR:
+        break;
+    }
+  }
+  return true;
+}
+
+
 void JSFunctionResultCache::JSFunctionResultCacheVerify() {
   JSFunction::cast(get(kFactoryIndex))->Verify();
 
@@ -923,6 +992,28 @@ void NormalizedMapCache::NormalizedMapCacheVerify() {
 }
 
 
+void Map::ZapInstanceDescriptors() {
+  DescriptorArray* descriptors = instance_descriptors();
+  if (descriptors == GetHeap()->empty_descriptor_array()) return;
+  FixedArray* contents = FixedArray::cast(
+      descriptors->get(DescriptorArray::kContentArrayIndex));
+  MemsetPointer(descriptors->data_start(),
+                GetHeap()->the_hole_value(),
+                descriptors->length());
+  MemsetPointer(contents->data_start(),
+                GetHeap()->the_hole_value(),
+                contents->length());
+}
+
+
+void Map::ZapPrototypeTransitions() {
+  FixedArray* proto_transitions = prototype_transitions();
+  MemsetPointer(proto_transitions->data_start(),
+                GetHeap()->the_hole_value(),
+                proto_transitions->length());
+}
+
+
 #endif  // DEBUG
 
 } }  // namespace v8::internal
index f29cf6e..5444438 100644 (file)
@@ -581,7 +581,8 @@ bool Object::IsContext() {
             map == heap->catch_context_map() ||
             map == heap->with_context_map() ||
             map == heap->global_context_map() ||
-            map == heap->block_context_map());
+            map == heap->block_context_map() ||
+            map == heap->module_context_map());
   }
   return false;
 }
@@ -594,6 +595,13 @@ bool Object::IsGlobalContext() {
 }
 
 
+bool Object::IsModuleContext() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map() ==
+      HeapObject::cast(this)->GetHeap()->module_context_map();
+}
+
+
 bool Object::IsScopeInfo() {
   return Object::IsHeapObject() &&
       HeapObject::cast(this)->map() ==
@@ -613,6 +621,7 @@ TYPE_CHECKER(Code, CODE_TYPE)
 TYPE_CHECKER(Oddball, ODDBALL_TYPE)
 TYPE_CHECKER(JSGlobalPropertyCell, JS_GLOBAL_PROPERTY_CELL_TYPE)
 TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
+TYPE_CHECKER(JSModule, JS_MODULE_TYPE)
 TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
 TYPE_CHECKER(JSDate, JS_DATE_TYPE)
 TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
@@ -1383,7 +1392,9 @@ void JSObject::initialize_properties() {
 
 
 void JSObject::initialize_elements() {
-  ASSERT(map()->has_fast_elements() || map()->has_fast_smi_only_elements());
+  ASSERT(map()->has_fast_elements() ||
+         map()->has_fast_smi_only_elements() ||
+         map()->has_fast_double_elements());
   ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
   WRITE_FIELD(this, kElementsOffset, GetHeap()->empty_fixed_array());
 }
@@ -1436,6 +1447,8 @@ int JSObject::GetHeaderSize() {
   // field operations considerably on average.
   if (type == JS_OBJECT_TYPE) return JSObject::kHeaderSize;
   switch (type) {
+    case JS_MODULE_TYPE:
+      return JSModule::kSize;
     case JS_GLOBAL_PROXY_TYPE:
       return JSGlobalProxy::kSize;
     case JS_GLOBAL_OBJECT_TYPE:
@@ -1470,7 +1483,7 @@ int JSObject::GetInternalFieldCount() {
   // Make sure to adjust for the number of in-object properties. These
   // properties do contribute to the size, but are not internal fields.
   return ((Size() - GetHeaderSize()) >> kPointerSizeLog2) -
-         map()->inobject_properties() - (map()->has_external_resource()?1:0);
+         map()->inobject_properties();
 }
 
 
@@ -1510,23 +1523,6 @@ void JSObject::SetInternalField(int index, Smi* value) {
 }
 
 
-void JSObject::SetExternalResourceObject(Object *value) {
-  ASSERT(map()->has_external_resource());
-  int offset = GetHeaderSize() + kPointerSize * GetInternalFieldCount();
-  WRITE_FIELD(this, offset, value);
-  WRITE_BARRIER(GetHeap(), this, offset, value);
-}
-
-
-Object *JSObject::GetExternalResourceObject() {
-  if (map()->has_external_resource()) {
-    return READ_FIELD(this, GetHeaderSize() + kPointerSize * GetInternalFieldCount());
-  } else {
-    return GetHeap()->undefined_value();
-  }
-}
-
-
 // Access fast-case object properties at index. The use of these routines
 // is needed to correctly distinguish between properties stored in-object and
 // properties stored in the properties array.
@@ -1939,15 +1935,15 @@ Object* DescriptorArray::GetValue(int descriptor_number) {
 }
 
 
-Smi* DescriptorArray::GetDetails(int descriptor_number) {
+PropertyDetails DescriptorArray::GetDetails(int descriptor_number) {
   ASSERT(descriptor_number < number_of_descriptors());
-  return Smi::cast(GetContentArray()->get(ToDetailsIndex(descriptor_number)));
+  Object* details = GetContentArray()->get(ToDetailsIndex(descriptor_number));
+  return PropertyDetails(Smi::cast(details));
 }
 
 
 PropertyType DescriptorArray::GetType(int descriptor_number) {
-  ASSERT(descriptor_number < number_of_descriptors());
-  return PropertyDetails(GetDetails(descriptor_number)).type();
+  return GetDetails(descriptor_number).type();
 }
 
 
@@ -2010,15 +2006,10 @@ bool DescriptorArray::IsNullDescriptor(int descriptor_number) {
 }
 
 
-bool DescriptorArray::IsDontEnum(int descriptor_number) {
-  return PropertyDetails(GetDetails(descriptor_number)).IsDontEnum();
-}
-
-
 void DescriptorArray::Get(int descriptor_number, Descriptor* desc) {
   desc->Init(GetKey(descriptor_number),
              GetValue(descriptor_number),
-             PropertyDetails(GetDetails(descriptor_number)));
+             GetDetails(descriptor_number));
 }
 
 
@@ -2206,7 +2197,6 @@ SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
 SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
 
 SMI_ACCESSORS(String, length, kLengthOffset)
-SMI_ACCESSORS(SeqString, symbol_id, kSymbolIdOffset)
 
 
 uint32_t String::hash_field() {
@@ -2908,14 +2898,14 @@ bool Map::is_extensible() {
 
 void Map::set_attached_to_shared_function_info(bool value) {
   if (value) {
-    set_bit_field3(bit_field3() | (1 << kAttachedToSharedFunctionInfo));
+    set_bit_field2(bit_field2() | (1 << kAttachedToSharedFunctionInfo));
   } else {
-    set_bit_field3(bit_field3() & ~(1 << kAttachedToSharedFunctionInfo));
+    set_bit_field2(bit_field2() & ~(1 << kAttachedToSharedFunctionInfo));
   }
 }
 
 bool Map::attached_to_shared_function_info() {
-  return ((1 << kAttachedToSharedFunctionInfo) & bit_field3()) != 0;
+  return ((1 << kAttachedToSharedFunctionInfo) & bit_field2()) != 0;
 }
 
 
@@ -2931,46 +2921,6 @@ bool Map::is_shared() {
   return ((1 << kIsShared) & bit_field3()) != 0;
 }
 
-void Map::set_has_external_resource(bool value) {
-  if (value) {
-    set_bit_field(bit_field() | (1 << kHasExternalResource));
-  } else {
-    set_bit_field(bit_field() & ~(1 << kHasExternalResource));
-  }
-}
-
-bool Map::has_external_resource()
-{
-    return ((1 << kHasExternalResource) & bit_field()) != 0;
-}
-
-
-void Map::set_use_user_object_comparison(bool value) {
-  if (value) {
-    set_bit_field2(bit_field2() | (1 << kUseUserObjectComparison));
-  } else {
-    set_bit_field2(bit_field2() & ~(1 << kUseUserObjectComparison));
-  }
-}
-
-
-bool Map::use_user_object_comparison() {
-    return ((1 << kUseUserObjectComparison) & bit_field2()) != 0;
-}
-
-
-void Map::set_named_interceptor_is_fallback(bool value) {
-  if (value) {
-    set_bit_field3(bit_field3() | (1 << kNamedInterceptorIsFallback));
-  } else {
-    set_bit_field3(bit_field3() & ~(1 << kNamedInterceptorIsFallback));
-  }
-}
-
-bool Map::named_interceptor_is_fallback() {
-  return ((1 << kNamedInterceptorIsFallback) & bit_field3()) != 0;
-}
-
 
 JSFunction* Map::unchecked_constructor() {
   return reinterpret_cast<JSFunction*>(READ_FIELD(this, kConstructorOffset));
@@ -3243,6 +3193,18 @@ void Code::set_compare_state(byte value) {
 }
 
 
+byte Code::compare_operation() {
+  ASSERT(is_compare_ic_stub());
+  return READ_BYTE_FIELD(this, kCompareOperationOffset);
+}
+
+
+void Code::set_compare_operation(byte value) {
+  ASSERT(is_compare_ic_stub());
+  WRITE_BYTE_FIELD(this, kCompareOperationOffset, value);
+}
+
+
 byte Code::to_boolean_state() {
   ASSERT(is_to_boolean_ic_stub());
   return READ_BYTE_FIELD(this, kToBooleanTypeOffset);
@@ -3389,6 +3351,9 @@ void Map::clear_instance_descriptors() {
   Object* object = READ_FIELD(this,
                               kInstanceDescriptorsOrBitField3Offset);
   if (!object->IsSmi()) {
+#ifdef DEBUG
+    ZapInstanceDescriptors();
+#endif
     WRITE_FIELD(
         this,
         kInstanceDescriptorsOrBitField3Offset,
@@ -3414,6 +3379,11 @@ void Map::set_instance_descriptors(DescriptorArray* value,
     }
   }
   ASSERT(!is_shared());
+#ifdef DEBUG
+  if (value != instance_descriptors()) {
+    ZapInstanceDescriptors();
+  }
+#endif
   WRITE_FIELD(this, kInstanceDescriptorsOrBitField3Offset, value);
   CONDITIONAL_WRITE_BARRIER(
       heap, this, kInstanceDescriptorsOrBitField3Offset, value, mode);
@@ -3445,14 +3415,71 @@ void Map::set_bit_field3(int value) {
 }
 
 
-FixedArray* Map::unchecked_prototype_transitions() {
-  return reinterpret_cast<FixedArray*>(
-      READ_FIELD(this, kPrototypeTransitionsOffset));
+Object* Map::GetBackPointer() {
+  Object* object = READ_FIELD(this, kPrototypeTransitionsOrBackPointerOffset);
+  if (object->IsFixedArray()) {
+    return FixedArray::cast(object)->get(kProtoTransitionBackPointerOffset);
+  } else {
+    return object;
+  }
+}
+
+
+void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
+  Heap* heap = GetHeap();
+  ASSERT(instance_type() >= FIRST_JS_RECEIVER_TYPE);
+  ASSERT((value->IsUndefined() && GetBackPointer()->IsMap()) ||
+         (value->IsMap() && GetBackPointer()->IsUndefined()));
+  Object* object = READ_FIELD(this, kPrototypeTransitionsOrBackPointerOffset);
+  if (object->IsFixedArray()) {
+    FixedArray::cast(object)->set(
+        kProtoTransitionBackPointerOffset, value, mode);
+  } else {
+    WRITE_FIELD(this, kPrototypeTransitionsOrBackPointerOffset, value);
+    CONDITIONAL_WRITE_BARRIER(
+        heap, this, kPrototypeTransitionsOrBackPointerOffset, value, mode);
+  }
+}
+
+
+FixedArray* Map::prototype_transitions() {
+  Object* object = READ_FIELD(this, kPrototypeTransitionsOrBackPointerOffset);
+  if (object->IsFixedArray()) {
+    return FixedArray::cast(object);
+  } else {
+    return GetHeap()->empty_fixed_array();
+  }
+}
+
+
+void Map::set_prototype_transitions(FixedArray* value, WriteBarrierMode mode) {
+  Heap* heap = GetHeap();
+  ASSERT(value != heap->empty_fixed_array());
+  value->set(kProtoTransitionBackPointerOffset, GetBackPointer());
+#ifdef DEBUG
+  if (value != prototype_transitions()) {
+    ZapPrototypeTransitions();
+  }
+#endif
+  WRITE_FIELD(this, kPrototypeTransitionsOrBackPointerOffset, value);
+  CONDITIONAL_WRITE_BARRIER(
+      heap, this, kPrototypeTransitionsOrBackPointerOffset, value, mode);
+}
+
+
+void Map::init_prototype_transitions(Object* undefined) {
+  ASSERT(undefined->IsUndefined());
+  WRITE_FIELD(this, kPrototypeTransitionsOrBackPointerOffset, undefined);
+}
+
+
+HeapObject* Map::unchecked_prototype_transitions() {
+  Object* object = READ_FIELD(this, kPrototypeTransitionsOrBackPointerOffset);
+  return reinterpret_cast<HeapObject*>(object);
 }
 
 
 ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
-ACCESSORS(Map, prototype_transitions, FixedArray, kPrototypeTransitionsOffset)
 ACCESSORS(Map, constructor, Object, kConstructorOffset)
 
 ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
@@ -3487,7 +3514,6 @@ ACCESSORS(InterceptorInfo, query, Object, kQueryOffset)
 ACCESSORS(InterceptorInfo, deleter, Object, kDeleterOffset)
 ACCESSORS(InterceptorInfo, enumerator, Object, kEnumeratorOffset)
 ACCESSORS(InterceptorInfo, data, Object, kDataOffset)
-ACCESSORS(InterceptorInfo, is_fallback, Smi, kFallbackOffset)
 
 ACCESSORS(CallHandlerInfo, callback, Object, kCallbackOffset)
 ACCESSORS(CallHandlerInfo, data, Object, kDataOffset)
@@ -3519,10 +3545,6 @@ ACCESSORS_TO_SMI(FunctionTemplateInfo, flag, kFlagOffset)
 ACCESSORS(ObjectTemplateInfo, constructor, Object, kConstructorOffset)
 ACCESSORS(ObjectTemplateInfo, internal_field_count, Object,
           kInternalFieldCountOffset)
-ACCESSORS(ObjectTemplateInfo, has_external_resource, Object,
-          kHasExternalResourceOffset)
-ACCESSORS(ObjectTemplateInfo, use_user_object_comparison, Object, 
-          kUseUserObjectComparisonOffset)
 
 ACCESSORS(SignatureInfo, receiver, Object, kReceiverOffset)
 ACCESSORS(SignatureInfo, args, Object, kArgsOffset)
@@ -3715,6 +3737,12 @@ void SharedFunctionInfo::set_optimization_disabled(bool disable) {
 }
 
 
+int SharedFunctionInfo::profiler_ticks() {
+  if (code()->kind() != Code::FUNCTION) return 0;
+  return code()->profiler_ticks();
+}
+
+
 LanguageMode SharedFunctionInfo::language_mode() {
   int hints = compiler_hints();
   if (BooleanBit::get(hints, kExtendedModeFunction)) {
@@ -3748,8 +3776,6 @@ bool SharedFunctionInfo::is_classic_mode() {
 
 BOOL_GETTER(SharedFunctionInfo, compiler_hints, is_extended_mode,
             kExtendedModeFunction)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, qml_mode,
-               kQmlModeFunction)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints,
                name_should_print_as_anonymous,
@@ -4143,6 +4169,16 @@ void Foreign::set_foreign_address(Address value) {
 }
 
 
+ACCESSORS(JSModule, context, Object, kContextOffset)
+
+
+JSModule* JSModule::cast(Object* obj) {
+  ASSERT(obj->IsJSModule());
+  ASSERT(HeapObject::cast(obj)->Size() == JSModule::kSize);
+  return reinterpret_cast<JSModule*>(obj);
+}
+
+
 ACCESSORS(JSValue, value, Object, kValueOffset)
 
 
index 2353a95..febdaab 100644 (file)
@@ -135,6 +135,9 @@ void HeapObject::HeapObjectPrint(FILE* out) {
     case ODDBALL_TYPE:
       Oddball::cast(this)->to_string()->Print(out);
       break;
+    case JS_MODULE_TYPE:
+      JSModule::cast(this)->JSModulePrint(out);
+      break;
     case JS_FUNCTION_TYPE:
       JSFunction::cast(this)->JSFunctionPrint(out);
       break;
@@ -152,7 +155,7 @@ void HeapObject::HeapObjectPrint(FILE* out) {
       JSValue::cast(this)->value()->Print(out);
       break;
     case JS_DATE_TYPE:
-      JSDate::cast(this)->value()->Print(out);
+      JSDate::cast(this)->JSDatePrint(out);
       break;
     case CODE_TYPE:
       Code::cast(this)->CodePrint(out);
@@ -328,14 +331,16 @@ void JSObject::PrintElements(FILE* out) {
     }
     case FAST_DOUBLE_ELEMENTS: {
       // Print in array notation for non-sparse arrays.
-      FixedDoubleArray* p = FixedDoubleArray::cast(elements());
-      for (int i = 0; i < p->length(); i++) {
-        if (p->is_the_hole(i)) {
-          PrintF(out, "   %d: <the hole>", i);
-        } else {
-          PrintF(out, "   %d: %g", i, p->get_scalar(i));
+      if (elements()->length() > 0) {
+        FixedDoubleArray* p = FixedDoubleArray::cast(elements());
+        for (int i = 0; i < p->length(); i++) {
+          if (p->is_the_hole(i)) {
+            PrintF(out, "   %d: <the hole>", i);
+          } else {
+            PrintF(out, "   %d: %g", i, p->get_scalar(i));
+          }
+          PrintF(out, "\n");
         }
-        PrintF(out, "\n");
       }
       break;
     }
@@ -437,6 +442,19 @@ void JSObject::JSObjectPrint(FILE* out) {
 }
 
 
+void JSModule::JSModulePrint(FILE* out) {
+  HeapObject::PrintHeader(out, "JSModule");
+  PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+  PrintF(out, " - context = ");
+  context()->Print(out);
+  PrintElementsKind(out, this->map()->elements_kind());
+  PrintF(out, " {\n");
+  PrintProperties(out);
+  PrintElements(out);
+  PrintF(out, " }\n");
+}
+
+
 static const char* TypeToString(InstanceType type) {
   switch (type) {
     case INVALID_TYPE: return "INVALID";
@@ -483,6 +501,7 @@ static const char* TypeToString(InstanceType type) {
     case ODDBALL_TYPE: return "ODDBALL";
     case JS_GLOBAL_PROPERTY_CELL_TYPE: return "JS_GLOBAL_PROPERTY_CELL";
     case SHARED_FUNCTION_INFO_TYPE: return "SHARED_FUNCTION_INFO";
+    case JS_MODULE_TYPE: return "JS_MODULE";
     case JS_FUNCTION_TYPE: return "JS_FUNCTION";
     case CODE_TYPE: return "CODE";
     case JS_ARRAY_TYPE: return "JS_ARRAY";
index 627d1bc..8ba92f7 100644 (file)
@@ -72,9 +72,7 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
 
   table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
 
-  table_.Register(kVisitJSFunction,
-                  &JSObjectVisitor::
-                      template VisitSpecialized<JSFunction::kSize>);
+  table_.Register(kVisitJSFunction, &VisitJSFunction);
 
   table_.Register(kVisitFreeSpace, &VisitFreeSpace);
 
index c7c8a87..a2dc43e 100644 (file)
@@ -133,6 +133,7 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
 
     case JS_OBJECT_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+    case JS_MODULE_TYPE:
     case JS_VALUE_TYPE:
     case JS_DATE_TYPE:
     case JS_ARRAY_TYPE:
index 26e79ae..b476dfe 100644 (file)
@@ -289,6 +289,23 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
   }
 
  private:
+  static inline int VisitJSFunction(Map* map, HeapObject* object) {
+    Heap* heap = map->GetHeap();
+    VisitPointers(heap,
+                  HeapObject::RawField(object, JSFunction::kPropertiesOffset),
+                  HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
+
+    // Don't visit code entry. We are using this visitor only during scavenges.
+
+    VisitPointers(
+        heap,
+        HeapObject::RawField(object,
+                             JSFunction::kCodeEntryOffset + kPointerSize),
+        HeapObject::RawField(object,
+                             JSFunction::kNonWeakFieldsEndOffset));
+    return JSFunction::kSize;
+  }
+
   static inline int VisitByteArray(Map* map, HeapObject* object) {
     return reinterpret_cast<ByteArray*>(object)->ByteArraySize();
   }
index f7f2879..cb87c71 100644 (file)
@@ -1338,6 +1338,7 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
       break;
     case JS_OBJECT_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+    case JS_MODULE_TYPE:
     case JS_VALUE_TYPE:
     case JS_DATE_TYPE:
     case JS_ARRAY_TYPE:
@@ -1603,6 +1604,7 @@ MaybeObject* JSObject::AddFastProperty(String* name,
   // We have now allocated all the necessary objects.
   // All the changes can be applied at once, so they are atomic.
   map()->set_instance_descriptors(old_descriptors);
+  new_map->SetBackPointer(map());
   new_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
   set_map(new_map);
   return FastPropertyAtPut(index, value);
@@ -1663,6 +1665,7 @@ MaybeObject* JSObject::AddConstantFunctionProperty(
     }
   }
   old_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
+  Map::cast(new_map)->SetBackPointer(old_map);
 
   return function;
 }
@@ -1823,6 +1826,7 @@ MaybeObject* JSObject::ConvertDescriptorToFieldAndMapTransition(
     }
   }
   old_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
+  map()->SetBackPointer(old_map);
   return result;
 }
 
@@ -1936,11 +1940,9 @@ Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
                                        Handle<String> key,
                                        Handle<Object> value,
                                        PropertyAttributes attributes,
-                                       StrictModeFlag strict_mode,
-                                       bool skip_fallback_interceptor) {
+                                       StrictModeFlag strict_mode) {
   CALL_HEAP_FUNCTION(object->GetIsolate(),
-                     object->SetProperty(*key, *value, attributes, strict_mode,
-                                         skip_fallback_interceptor),
+                     object->SetProperty(*key, *value, attributes, strict_mode),
                      Object);
 }
 
@@ -1948,10 +1950,9 @@ Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
 MaybeObject* JSReceiver::SetProperty(String* name,
                                      Object* value,
                                      PropertyAttributes attributes,
-                                     StrictModeFlag strict_mode,
-                                     bool skip_fallback_interceptor) {
+                                     StrictModeFlag strict_mode) {
   LookupResult result(GetIsolate());
-  LocalLookup(name, &result, skip_fallback_interceptor);
+  LocalLookup(name, &result);
   return SetProperty(&result, name, value, attributes, strict_mode);
 }
 
@@ -2324,7 +2325,7 @@ Object* Map::GetDescriptorContents(String* sentinel_name,
   }
   // If the transition already exists, return its descriptor.
   if (index != DescriptorArray::kNotFound) {
-    PropertyDetails details(descriptors->GetDetails(index));
+    PropertyDetails details = descriptors->GetDetails(index);
     if (details.type() == ELEMENTS_TRANSITION) {
       return descriptors->GetValue(index);
     } else {
@@ -2410,6 +2411,7 @@ MaybeObject* Map::AddElementsTransition(ElementsKind elements_kind,
     return maybe_new_descriptors;
   }
   set_instance_descriptors(DescriptorArray::cast(new_descriptors));
+  transitioned_map->SetBackPointer(this);
   return this;
 }
 
@@ -3028,7 +3030,6 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
     String* name,
     Object* value,
     PropertyAttributes attributes) {
-
   // Make sure that the top context does not change when doing callbacks or
   // interceptor calls.
   AssertNoContextChange ncc;
@@ -3097,7 +3098,6 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
       return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
     case HANDLER:
       UNREACHABLE();
-      return value;
   }
   UNREACHABLE();  // keep the compiler happy
   return value;
@@ -3348,7 +3348,7 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
 
   DescriptorArray* descs = map_of_this->instance_descriptors();
   for (int i = 0; i < descs->number_of_descriptors(); i++) {
-    PropertyDetails details(descs->GetDetails(i));
+    PropertyDetails details = descs->GetDetails(i);
     switch (details.type()) {
       case CONSTANT_FUNCTION: {
         PropertyDetails d =
@@ -4210,7 +4210,7 @@ int Map::NumberOfDescribedProperties(PropertyAttributes filter) {
   int result = 0;
   DescriptorArray* descs = instance_descriptors();
   for (int i = 0; i < descs->number_of_descriptors(); i++) {
-    PropertyDetails details(descs->GetDetails(i));
+    PropertyDetails details = descs->GetDetails(i);
     if (descs->IsProperty(i) && (details.attributes() & filter) == 0) {
       result++;
     }
@@ -4254,8 +4254,7 @@ AccessorDescriptor* Map::FindAccessor(String* name) {
 }
 
 
-void JSReceiver::LocalLookup(String* name, LookupResult* result,
-                             bool skip_fallback_interceptor) {
+void JSReceiver::LocalLookup(String* name, LookupResult* result) {
   ASSERT(name->IsString());
 
   Heap* heap = GetHeap();
@@ -4287,33 +4286,23 @@ void JSReceiver::LocalLookup(String* name, LookupResult* result,
   }
 
   // Check for lookup interceptor except when bootstrapping.
-  bool wouldIntercept = js_object->HasNamedInterceptor() &&
-                        !heap->isolate()->bootstrapper()->IsActive();
-  if (wouldIntercept && !map()->named_interceptor_is_fallback()) {
+  if (js_object->HasNamedInterceptor() &&
+      !heap->isolate()->bootstrapper()->IsActive()) {
     result->InterceptorResult(js_object);
     return;
   }
 
   js_object->LocalLookupRealNamedProperty(name, result);
-
-  if (wouldIntercept && !skip_fallback_interceptor && !result->IsProperty() &&
-      map()->named_interceptor_is_fallback()) {
-    result->InterceptorResult(js_object);
-    return;
-  }
 }
 
 
-void JSReceiver::Lookup(String* name, LookupResult* result,
-                        bool skip_fallback_interceptor) {
+void JSReceiver::Lookup(String* name, LookupResult* result) {
   // Ecma-262 3rd 8.6.2.4
   Heap* heap = GetHeap();
   for (Object* current = this;
        current != heap->null_value();
        current = JSObject::cast(current)->GetPrototype()) {
-    JSReceiver::cast(current)->LocalLookup(name,
-                                           result,
-                                           skip_fallback_interceptor);
+    JSReceiver::cast(current)->LocalLookup(name, result);
     if (result->IsProperty()) return;
   }
   result->NotFound();
@@ -4424,37 +4413,29 @@ MaybeObject* JSObject::DefineElementAccessor(uint32_t index,
 }
 
 
-MaybeObject* JSObject::DefinePropertyAccessor(String* name,
-                                              Object* getter,
-                                              Object* setter,
-                                              PropertyAttributes attributes) {
-  // Lookup the name.
+MaybeObject* JSObject::CreateAccessorPairFor(String* name) {
   LookupResult result(GetHeap()->isolate());
   LocalLookupRealNamedProperty(name, &result);
-  if (result.IsFound()) {
-    if (result.type() == CALLBACKS) {
-      ASSERT(!result.IsDontDelete());
-      Object* obj = result.GetCallbackObject();
-      // Need to preserve old getters/setters.
-      if (obj->IsAccessorPair()) {
-        AccessorPair* copy;
-        { MaybeObject* maybe_copy =
-              AccessorPair::cast(obj)->CopyWithoutTransitions();
-          if (!maybe_copy->To(&copy)) return maybe_copy;
-        }
-        copy->SetComponents(getter, setter);
-        // Use set to update attributes.
-        return SetPropertyCallback(name, copy, attributes);
-      }
+  if (result.IsProperty() && result.type() == CALLBACKS) {
+    ASSERT(!result.IsDontDelete());
+    Object* obj = result.GetCallbackObject();
+    if (obj->IsAccessorPair()) {
+      return AccessorPair::cast(obj)->CopyWithoutTransitions();
     }
   }
+  return GetHeap()->AllocateAccessorPair();
+}
 
+
+MaybeObject* JSObject::DefinePropertyAccessor(String* name,
+                                              Object* getter,
+                                              Object* setter,
+                                              PropertyAttributes attributes) {
   AccessorPair* accessors;
-  { MaybeObject* maybe_accessors = GetHeap()->AllocateAccessorPair();
+  { MaybeObject* maybe_accessors = CreateAccessorPairFor(name);
     if (!maybe_accessors->To(&accessors)) return maybe_accessors;
   }
   accessors->SetComponents(getter, setter);
-
   return SetPropertyCallback(name, accessors, attributes);
 }
 
@@ -4982,7 +4963,7 @@ class IntrusiveMapTransitionIterator {
 // underlying array while it is running.
 class IntrusivePrototypeTransitionIterator {
  public:
-  explicit IntrusivePrototypeTransitionIterator(FixedArray* proto_trans)
+  explicit IntrusivePrototypeTransitionIterator(HeapObject* proto_trans)
       : proto_trans_(proto_trans) { }
 
   void Start() {
@@ -5007,7 +4988,7 @@ class IntrusivePrototypeTransitionIterator {
 
  private:
   bool HasTransitions() {
-    return proto_trans_->length() >= Map::kProtoTransitionHeaderSize;
+    return proto_trans_->map()->IsSmi() || proto_trans_->IsFixedArray();
   }
 
   Object** Header() {
@@ -5015,12 +4996,16 @@ class IntrusivePrototypeTransitionIterator {
   }
 
   int NumberOfTransitions() {
-    Object* num = proto_trans_->get(Map::kProtoTransitionNumberOfEntriesOffset);
+    ASSERT(HasTransitions());
+    FixedArray* proto_trans = reinterpret_cast<FixedArray*>(proto_trans_);
+    Object* num = proto_trans->get(Map::kProtoTransitionNumberOfEntriesOffset);
     return Smi::cast(num)->value();
   }
 
   Map* GetTransition(int transitionNumber) {
-    return Map::cast(proto_trans_->get(IndexFor(transitionNumber)));
+    ASSERT(HasTransitions());
+    FixedArray* proto_trans = reinterpret_cast<FixedArray*>(proto_trans_);
+    return Map::cast(proto_trans->get(IndexFor(transitionNumber)));
   }
 
   int IndexFor(int transitionNumber) {
@@ -5029,7 +5014,7 @@ class IntrusivePrototypeTransitionIterator {
         transitionNumber * Map::kProtoTransitionElementsPerEntry;
   }
 
-  FixedArray* proto_trans_;
+  HeapObject* proto_trans_;
 };
 
 
@@ -5710,7 +5695,7 @@ MaybeObject* DescriptorArray::CopyFrom(int dst_index,
                                        int src_index,
                                        const WhitenessWitness& witness) {
   Object* value = src->GetValue(src_index);
-  PropertyDetails details(src->GetDetails(src_index));
+  PropertyDetails details = src->GetDetails(src_index);
   if (details.type() == CALLBACKS && value->IsAccessorPair()) {
     MaybeObject* maybe_copy =
         AccessorPair::cast(value)->CopyWithoutTransitions();
@@ -5753,7 +5738,7 @@ MaybeObject* DescriptorArray::CopyInsert(Descriptor* descriptor,
   if (replacing) {
     // We are replacing an existing descriptor.  We keep the enumeration
     // index of a visible property.
-    PropertyType t = PropertyDetails(GetDetails(index)).type();
+    PropertyType t = GetDetails(index).type();
     if (t == CONSTANT_FUNCTION ||
         t == FIELD ||
         t == CALLBACKS ||
@@ -5780,8 +5765,7 @@ MaybeObject* DescriptorArray::CopyInsert(Descriptor* descriptor,
   int enumeration_index = NextEnumerationIndex();
   if (!descriptor->ContainsTransition()) {
     if (keep_enumeration_index) {
-      descriptor->SetEnumerationIndex(
-          PropertyDetails(GetDetails(index)).index());
+      descriptor->SetEnumerationIndex(GetDetails(index).index());
     } else {
       descriptor->SetEnumerationIndex(enumeration_index);
       ++enumeration_index;
@@ -5925,10 +5909,10 @@ int DescriptorArray::BinarySearch(String* name, int low, int high) {
     ASSERT(hash == mid_hash);
     // There might be more, so we find the first one and
     // check them all to see if we have a match.
-    if (name == mid_name  && !is_null_descriptor(mid)) return mid;
+    if (name == mid_name  && !IsNullDescriptor(mid)) return mid;
     while ((mid > low) && (GetKey(mid - 1)->Hash() == hash)) mid--;
     for (; (mid <= high) && (GetKey(mid)->Hash() == hash); mid++) {
-      if (GetKey(mid)->Equals(name) && !is_null_descriptor(mid)) return mid;
+      if (GetKey(mid)->Equals(name) && !IsNullDescriptor(mid)) return mid;
     }
     break;
   }
@@ -5942,7 +5926,7 @@ int DescriptorArray::LinearSearch(String* name, int len) {
     String* entry = GetKey(number);
     if ((entry->Hash() == hash) &&
         name->Equals(entry) &&
-        !is_null_descriptor(number)) {
+        !IsNullDescriptor(number)) {
       return number;
     }
   }
@@ -6870,72 +6854,6 @@ static inline bool CompareStringContentsPartial(Isolate* isolate,
 }
 
 
-bool String::SlowEqualsExternal(uc16 *string, int length) {
-  int len = this->length();
-  if (len != length) return false;
-  if (len == 0) return true;
-
-  // We know the strings are both non-empty. Compare the first chars
-  // before we try to flatten the strings.
-  if (this->Get(0) != string[0]) return false;
-
-  String* lhs = this->TryFlattenGetString();
-
-  if (lhs->IsFlat()) {
-    String::FlatContent lhs_content = lhs->GetFlatContent();
-    if (lhs->IsAsciiRepresentation()) {
-      Vector<const char> vec1 = lhs_content.ToAsciiVector();
-      VectorIterator<char> buf1(vec1);
-      VectorIterator<uc16> ib(string, length);
-      return CompareStringContents(&buf1, &ib);
-    } else {
-      Vector<const uc16> vec1 = lhs_content.ToUC16Vector();
-      Vector<const uc16> vec2(string, length);
-      return CompareRawStringContents(vec1, vec2);
-    }
-  } else {
-    Isolate* isolate = GetIsolate();
-    isolate->objects_string_compare_buffer_a()->Reset(0, lhs);
-    VectorIterator<uc16> ib(string, length);
-    return CompareStringContents(isolate->objects_string_compare_buffer_a(),
-                                 &ib);
-  }
-}
-
-
-bool String::SlowEqualsExternal(char *string, int length) {
-  int len = this->length();
-  if (len != length) return false;
-  if (len == 0) return true;
-
-  // We know the strings are both non-empty. Compare the first chars
-  // before we try to flatten the strings.
-  if (this->Get(0) != string[0]) return false;
-
-  String* lhs = this->TryFlattenGetString();
-
-  if (StringShape(lhs).IsSequentialAscii()) {
-      const char* str1 = SeqAsciiString::cast(lhs)->GetChars();
-      return CompareRawStringContents(Vector<const char>(str1, len),
-                                      Vector<const char>(string, len));
-  }
-
-  if (lhs->IsFlat()) {
-    String::FlatContent lhs_content = lhs->GetFlatContent();
-    Vector<const uc16> vec1 = lhs_content.ToUC16Vector();
-    VectorIterator<const uc16> buf1(vec1);
-    VectorIterator<char> buf2(string, length);
-    return CompareStringContents(&buf1, &buf2);
-  } else {
-    Isolate* isolate = GetIsolate();
-    isolate->objects_string_compare_buffer_a()->Reset(0, lhs);
-    VectorIterator<char> ib(string, length);
-    return CompareStringContents(isolate->objects_string_compare_buffer_a(),
-                                 &ib);
-  }
-}
-
-
 bool String::SlowEquals(String* other) {
   // Fast check: negative check with lengths.
   int len = length();
@@ -7258,85 +7176,23 @@ void String::PrintOn(FILE* file) {
 }
 
 
-void Map::CreateOneBackPointer(Object* transition_target) {
-  if (!transition_target->IsMap()) return;
-  Map* target = Map::cast(transition_target);
-#ifdef DEBUG
-  // Verify target.
-  Object* source_prototype = prototype();
-  Object* target_prototype = target->prototype();
-  ASSERT(source_prototype->IsJSReceiver() ||
-         source_prototype->IsMap() ||
-         source_prototype->IsNull());
-  ASSERT(target_prototype->IsJSReceiver() ||
-         target_prototype->IsNull());
-  ASSERT(source_prototype->IsMap() ||
-         source_prototype == target_prototype);
-#endif
-  // Point target back to source.  set_prototype() will not let us set
-  // the prototype to a map, as we do here.
-  *RawField(target, kPrototypeOffset) = this;
-}
-
-
-void Map::CreateBackPointers() {
-  DescriptorArray* descriptors = instance_descriptors();
-  for (int i = 0; i < descriptors->number_of_descriptors(); i++) {
-    switch (descriptors->GetType(i)) {
-      case MAP_TRANSITION:
-      case CONSTANT_TRANSITION:
-        CreateOneBackPointer(descriptors->GetValue(i));
-        break;
-      case ELEMENTS_TRANSITION: {
-        Object* object = descriptors->GetValue(i);
-        if (object->IsMap()) {
-          CreateOneBackPointer(object);
-        } else {
-          FixedArray* array = FixedArray::cast(object);
-          for (int i = 0; i < array->length(); ++i) {
-            CreateOneBackPointer(array->get(i));
-          }
-        }
-        break;
-      }
-      case CALLBACKS: {
-        Object* object = descriptors->GetValue(i);
-        if (object->IsAccessorPair()) {
-          AccessorPair* accessors = AccessorPair::cast(object);
-          CreateOneBackPointer(accessors->getter());
-          CreateOneBackPointer(accessors->setter());
-        }
-        break;
-      }
-      case NORMAL:
-      case FIELD:
-      case CONSTANT_FUNCTION:
-      case HANDLER:
-      case INTERCEPTOR:
-      case NULL_DESCRIPTOR:
-        break;
-    }
-  }
-}
-
-
-bool Map::RestoreOneBackPointer(Object* object,
-                                Object* real_prototype,
-                                bool* keep_entry) {
-  if (!object->IsMap()) return false;
-  Map* map = Map::cast(object);
+// Clear a possible back pointer in case the transition leads to a dead map.
+// Return true in case a back pointer has been cleared and false otherwise.
+// Set *keep_entry to true when a live map transition has been found.
+static bool ClearBackPointer(Heap* heap, Object* target, bool* keep_entry) {
+  if (!target->IsMap()) return false;
+  Map* map = Map::cast(target);
   if (Marking::MarkBitFrom(map).Get()) {
     *keep_entry = true;
     return false;
+  } else {
+    map->SetBackPointer(heap->undefined_value(), SKIP_WRITE_BARRIER);
+    return true;
   }
-  ASSERT(map->prototype() == this || map->prototype() == real_prototype);
-  // Getter prototype() is read-only, set_prototype() has side effects.
-  *RawField(map, Map::kPrototypeOffset) = real_prototype;
-  return true;
 }
 
 
-void Map::ClearNonLiveTransitions(Heap* heap, Object* real_prototype) {
+void Map::ClearNonLiveTransitions(Heap* heap) {
   DescriptorArray* d = DescriptorArray::cast(
       *RawField(this, Map::kInstanceDescriptorsOrBitField3Offset));
   if (d->IsEmpty()) return;
@@ -7349,24 +7205,22 @@ void Map::ClearNonLiveTransitions(Heap* heap, Object* real_prototype) {
     // If the pair (value, details) is a map transition, check if the target is
     // live. If not, null the descriptor. Also drop the back pointer for that
     // map transition, so that this map is not reached again by following a back
-    // pointer from a non-live object.
+    // pointer from that non-live map.
     bool keep_entry = false;
     PropertyDetails details(Smi::cast(contents->get(i + 1)));
     switch (details.type()) {
       case MAP_TRANSITION:
       case CONSTANT_TRANSITION:
-        RestoreOneBackPointer(contents->get(i), real_prototype, &keep_entry);
+        ClearBackPointer(heap, contents->get(i), &keep_entry);
         break;
       case ELEMENTS_TRANSITION: {
         Object* object = contents->get(i);
         if (object->IsMap()) {
-          RestoreOneBackPointer(object, real_prototype, &keep_entry);
+          ClearBackPointer(heap, object, &keep_entry);
         } else {
           FixedArray* array = FixedArray::cast(object);
           for (int j = 0; j < array->length(); ++j) {
-            if (RestoreOneBackPointer(array->get(j),
-                                      real_prototype,
-                                      &keep_entry)) {
+            if (ClearBackPointer(heap, array->get(j), &keep_entry)) {
               array->set_undefined(j);
             }
           }
@@ -7377,14 +7231,10 @@ void Map::ClearNonLiveTransitions(Heap* heap, Object* real_prototype) {
         Object* object = contents->get(i);
         if (object->IsAccessorPair()) {
           AccessorPair* accessors = AccessorPair::cast(object);
-          if (RestoreOneBackPointer(accessors->getter(),
-                                    real_prototype,
-                                    &keep_entry)) {
+          if (ClearBackPointer(heap, accessors->getter(), &keep_entry)) {
             accessors->set_getter(heap->the_hole_value());
           }
-          if (RestoreOneBackPointer(accessors->setter(),
-                                    real_prototype,
-                                    &keep_entry)) {
+          if (ClearBackPointer(heap, accessors->setter(), &keep_entry)) {
             accessors->set_setter(heap->the_hole_value());
           }
         } else {
@@ -7914,8 +7764,8 @@ void SharedFunctionInfo::DetachInitialMap() {
   Map* map = reinterpret_cast<Map*>(initial_map());
 
   // Make the map remember to restore the link if it survives the GC.
-  map->set_bit_field3(
-      map->bit_field3() | (1 << Map::kAttachedToSharedFunctionInfo));
+  map->set_bit_field2(
+      map->bit_field2() | (1 << Map::kAttachedToSharedFunctionInfo));
 
   // Undo state changes made by StartInobjectTracking (except the
   // construction_count). This way if the initial map does not survive the GC
@@ -7935,8 +7785,8 @@ void SharedFunctionInfo::DetachInitialMap() {
 
 // Called from GC, hence reinterpret_cast and unchecked accessors.
 void SharedFunctionInfo::AttachInitialMap(Map* map) {
-  map->set_bit_field3(
-      map->bit_field3() & ~(1 << Map::kAttachedToSharedFunctionInfo));
+  map->set_bit_field2(
+      map->bit_field2() & ~(1 << Map::kAttachedToSharedFunctionInfo));
 
   // Resume inobject slack tracking.
   set_initial_map(map);
@@ -8232,6 +8082,20 @@ void Code::ClearInlineCaches() {
 }
 
 
+void Code::ClearTypeFeedbackCells(Heap* heap) {
+  Object* raw_info = type_feedback_info();
+  if (raw_info->IsTypeFeedbackInfo()) {
+    TypeFeedbackCells* type_feedback_cells =
+        TypeFeedbackInfo::cast(raw_info)->type_feedback_cells();
+    for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
+      ASSERT(type_feedback_cells->AstId(i)->IsSmi());
+      JSGlobalPropertyCell* cell = type_feedback_cells->Cell(i);
+      cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
+    }
+  }
+}
+
+
 #ifdef ENABLE_DISASSEMBLER
 
 void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
@@ -8464,6 +8328,14 @@ void Code::Disassemble(const char* name, FILE* out) {
     if (is_call_stub() || is_keyed_call_stub()) {
       PrintF(out, "argc = %d\n", arguments_count());
     }
+    if (is_compare_ic_stub()) {
+      CompareIC::State state = CompareIC::ComputeState(this);
+      PrintF(out, "compare_state = %s\n", CompareIC::GetStateName(state));
+    }
+    if (is_compare_ic_stub() && major_key() == CodeStub::CompareIC) {
+      Token::Value op = CompareIC::ComputeOperation(this);
+      PrintF(out, "compare_operation = %s\n", Token::Name(op));
+    }
   }
   if ((name != NULL) && (name[0] != '\0')) {
     PrintF(out, "name = %s\n", name);
@@ -8569,8 +8441,10 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
   ElementsKind to_kind = (elements_kind == FAST_SMI_ONLY_ELEMENTS)
       ? FAST_SMI_ONLY_ELEMENTS
       : FAST_ELEMENTS;
-  //  int copy_size = Min(old_elements_raw->length(), new_elements->length());
-  accessor->CopyElements(this, new_elements, to_kind);
+  { MaybeObject* maybe_obj =
+        accessor->CopyElements(this, new_elements, to_kind);
+    if (maybe_obj->IsFailure()) return maybe_obj;
+  }
   if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
     set_map_and_elements(new_map, new_elements);
   } else {
@@ -8599,7 +8473,7 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
   // We should never end in here with a pixel or external array.
   ASSERT(!HasExternalArrayElements());
 
-  FixedDoubleArray* elems;
+  FixedArrayBase* elems;
   { MaybeObject* maybe_obj =
         heap->AllocateUninitializedFixedDoubleArray(capacity);
     if (!maybe_obj->To(&elems)) return maybe_obj;
@@ -8614,7 +8488,10 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
   FixedArrayBase* old_elements = elements();
   ElementsKind elements_kind = GetElementsKind();
   ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
-  accessor->CopyElements(this, elems, FAST_DOUBLE_ELEMENTS);
+  { MaybeObject* maybe_obj =
+        accessor->CopyElements(this, elems, FAST_DOUBLE_ELEMENTS);
+    if (maybe_obj->IsFailure()) return maybe_obj;
+  }
   if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
     set_map_and_elements(new_map, elems);
   } else {
@@ -9750,9 +9627,10 @@ MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
   ElementsKind from_kind = map()->elements_kind();
 
   Isolate* isolate = GetIsolate();
-  if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
-      (to_kind == FAST_ELEMENTS ||
-       elements() == isolate->heap()->empty_fixed_array())) {
+  if ((from_kind == FAST_SMI_ONLY_ELEMENTS ||
+      elements() == isolate->heap()->empty_fixed_array()) &&
+      to_kind == FAST_ELEMENTS) {
+    ASSERT(from_kind != FAST_ELEMENTS);
     MaybeObject* maybe_new_map = GetElementsTransitionMap(isolate, to_kind);
     Map* new_map;
     if (!maybe_new_map->To(&new_map)) return maybe_new_map;
@@ -10796,27 +10674,9 @@ class AsciiSymbolKey : public SequentialSymbolKey<char> {
 
   MaybeObject* AsObject() {
     if (hash_field_ == 0) Hash();
-    MaybeObject *result = HEAP->AllocateAsciiSymbol(string_, hash_field_);
-    if (!result->IsFailure() && result->ToObjectUnchecked()->IsSeqString()) {
-        while (true) {
-            Atomic32 my_symbol_id = next_symbol_id;
-            if (my_symbol_id > Smi::kMaxValue)
-                break;
-            if (my_symbol_id == NoBarrier_CompareAndSwap(&next_symbol_id,
-                                                         my_symbol_id,
-                                                         my_symbol_id + 1)) {
-                SeqString::cast(result->ToObjectUnchecked())->
-                    set_symbol_id(my_symbol_id);
-                break;
-            }
-        }
-    }
-    return result;
+    return HEAP->AllocateAsciiSymbol(string_, hash_field_);
   }
-
-  static Atomic32 next_symbol_id;
 };
-Atomic32 AsciiSymbolKey::next_symbol_id = 1;
 
 
 class SubStringAsciiSymbolKey : public HashTableKey {
@@ -11166,7 +11026,6 @@ template class Dictionary<SeededNumberDictionaryShape, uint32_t>;
 
 template class Dictionary<UnseededNumberDictionaryShape, uint32_t>;
 
-#ifndef __INTEL_COMPILER
 template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::
     Allocate(int at_least_space_for);
 
@@ -11259,7 +11118,7 @@ int Dictionary<StringDictionaryShape, String*>::NumberOfEnumElements();
 
 template
 int HashTable<SeededNumberDictionaryShape, uint32_t>::FindEntry(uint32_t);
-#endif
+
 
 // Collates undefined and unexisting elements below limit from position
 // zero of the elements. The object stays in Dictionary mode.
@@ -12949,7 +12808,7 @@ int BreakPointInfo::GetBreakPointCount() {
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
 
-MaybeObject* JSDate::GetField(Object* object, Smi* index) {
+Object* JSDate::GetField(Object* object, Smi* index) {
   return JSDate::cast(object)->DoGetField(
       static_cast<FieldIndex>(index->value()));
 }
index 73629b8..4fd29ad 100644 (file)
@@ -59,6 +59,7 @@
 //           - JSWeakMap
 //           - JSRegExp
 //           - JSFunction
+//           - JSModule
 //           - GlobalObject
 //             - JSGlobalObject
 //             - JSBuiltinsObject
@@ -306,6 +307,7 @@ const int kVariableSizeSentinel = 0;
   V(JS_DATE_TYPE)                                                              \
   V(JS_OBJECT_TYPE)                                                            \
   V(JS_CONTEXT_EXTENSION_OBJECT_TYPE)                                          \
+  V(JS_MODULE_TYPE)                                                            \
   V(JS_GLOBAL_OBJECT_TYPE)                                                     \
   V(JS_BUILTINS_OBJECT_TYPE)                                                   \
   V(JS_GLOBAL_PROXY_TYPE)                                                      \
@@ -626,6 +628,7 @@ enum InstanceType {
   JS_DATE_TYPE,
   JS_OBJECT_TYPE,
   JS_CONTEXT_EXTENSION_OBJECT_TYPE,
+  JS_MODULE_TYPE,
   JS_GLOBAL_OBJECT_TYPE,
   JS_BUILTINS_OBJECT_TYPE,
   JS_GLOBAL_PROXY_TYPE,
@@ -677,6 +680,7 @@ const int kExternalArrayTypeCount =
 
 STATIC_CHECK(JS_OBJECT_TYPE == Internals::kJSObjectType);
 STATIC_CHECK(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
+STATIC_CHECK(ODDBALL_TYPE == Internals::kOddballType);
 STATIC_CHECK(FOREIGN_TYPE == Internals::kForeignType);
 
 
@@ -803,6 +807,7 @@ class MaybeObject BASE_EMBEDDED {
   V(JSReceiver)                                \
   V(JSObject)                                  \
   V(JSContextExtensionObject)                  \
+  V(JSModule)                                  \
   V(Map)                                       \
   V(DescriptorArray)                           \
   V(DeoptimizationInputData)                   \
@@ -812,6 +817,7 @@ class MaybeObject BASE_EMBEDDED {
   V(FixedDoubleArray)                          \
   V(Context)                                   \
   V(GlobalContext)                             \
+  V(ModuleContext)                             \
   V(ScopeInfo)                                 \
   V(JSFunction)                                \
   V(Code)                                      \
@@ -1393,14 +1399,12 @@ class JSReceiver: public HeapObject {
                                     Handle<String> key,
                                     Handle<Object> value,
                                     PropertyAttributes attributes,
-                                    StrictModeFlag strict_mode,
-                                    bool skip_fallback_interceptor = false);
+                                    StrictModeFlag strict_mode);
   // Can cause GC.
   MUST_USE_RESULT MaybeObject* SetProperty(String* key,
                                            Object* value,
                                            PropertyAttributes attributes,
-                                           StrictModeFlag strict_mode,
-                                           bool skip_fallback_interceptor = false);
+                                           StrictModeFlag strict_mode);
   MUST_USE_RESULT MaybeObject* SetProperty(LookupResult* result,
                                            String* key,
                                            Object* value,
@@ -1453,12 +1457,8 @@ class JSReceiver: public HeapObject {
 
   // Lookup a property.  If found, the result is valid and has
   // detailed information.
-  void LocalLookup(String* name,
-                   LookupResult* result,
-                   bool skip_fallback_interceptor = false);
-  void Lookup(String* name,
-              LookupResult* result,
-              bool skip_fallback_interceptor = false);
+  void LocalLookup(String* name, LookupResult* result);
+  void Lookup(String* name, LookupResult* result);
 
  protected:
   Smi* GenerateIdentityHash();
@@ -1857,9 +1857,6 @@ class JSObject: public JSReceiver {
   inline void SetInternalField(int index, Object* value);
   inline void SetInternalField(int index, Smi* value);
 
-  inline void SetExternalResourceObject(Object *);
-  inline Object *GetExternalResourceObject();
-
   // The following lookup functions skip interceptors.
   void LocalLookupRealNamedProperty(String* name, LookupResult* result);
   void LookupRealNamedProperty(String* name, LookupResult* result);
@@ -2195,6 +2192,7 @@ class JSObject: public JSReceiver {
       Object* getter,
       Object* setter,
       PropertyAttributes attributes);
+  MUST_USE_RESULT MaybeObject* CreateAccessorPairFor(String* name);
   MUST_USE_RESULT MaybeObject* DefinePropertyAccessor(
       String* name,
       Object* getter,
@@ -2476,7 +2474,7 @@ class DescriptorArray: public FixedArray {
   // Accessors for fetching instance descriptor at descriptor number.
   inline String* GetKey(int descriptor_number);
   inline Object* GetValue(int descriptor_number);
-  inline Smi* GetDetails(int descriptor_number);
+  inline PropertyDetails GetDetails(int descriptor_number);
   inline PropertyType GetType(int descriptor_number);
   inline int GetFieldIndex(int descriptor_number);
   inline JSFunction* GetConstantFunction(int descriptor_number);
@@ -2485,7 +2483,6 @@ class DescriptorArray: public FixedArray {
   inline bool IsProperty(int descriptor_number);
   inline bool IsTransitionOnly(int descriptor_number);
   inline bool IsNullDescriptor(int descriptor_number);
-  inline bool IsDontEnum(int descriptor_number);
 
   class WhitenessWitness {
    public:
@@ -2603,6 +2600,9 @@ class DescriptorArray: public FixedArray {
   // Is the descriptor array sorted and without duplicates?
   bool IsSortedNoDuplicates();
 
+  // Is the descriptor array consistent with the back pointers in targets?
+  bool IsConsistentWithBackPointers(Map* current_map);
+
   // Are two DescriptorArrays equal?
   bool IsEqualTo(DescriptorArray* other);
 #endif
@@ -2639,10 +2639,6 @@ class DescriptorArray: public FixedArray {
     return descriptor_number << 1;
   }
 
-  bool is_null_descriptor(int descriptor_number) {
-    return PropertyDetails(GetDetails(descriptor_number)).type() ==
-        NULL_DESCRIPTOR;
-  }
   // Swap operation on FixedArray without using write barriers.
   static inline void NoIncrementalWriteBarrierSwap(
       FixedArray* array, int first, int second);
@@ -3348,9 +3344,6 @@ class ScopeInfo : public FixedArray {
   // Return the language mode of this scope.
   LanguageMode language_mode();
 
-  // Is this scope a qml mode scope?
-  bool IsQmlMode();
-
   // Does this scope make a non-strict eval call?
   bool CallsNonStrictEval() {
     return CallsEval() && (language_mode() == CLASSIC_MODE);
@@ -3373,7 +3366,7 @@ class ScopeInfo : public FixedArray {
   //  3. One context slot for the function name if it is context allocated.
   // Parameters allocated in the context count as context allocated locals. If
   // no contexts are allocated for this scope ContextLength returns 0.
-  int ContextLength(bool qml_function = false);
+  int ContextLength();
 
   // Is this scope the scope of a named function expression?
   bool HasFunctionName();
@@ -3425,8 +3418,8 @@ class ScopeInfo : public FixedArray {
   // otherwise returns a value < 0. The name must be a symbol (canonicalized).
   int ParameterIndex(String* name);
 
-  // Lookup support for serialized scope info. Returns the
-  // function context slot index if the function name is present (named
+  // Lookup support for serialized scope info. Returns the function context
+  // slot index if the function name is present and context-allocated (named
   // function expressions, only), otherwise returns a value < 0. The name
   // must be a symbol (canonicalized).
   int FunctionContextSlotIndex(String* name, VariableMode* mode);
@@ -3522,9 +3515,8 @@ class ScopeInfo : public FixedArray {
   class TypeField:             public BitField<ScopeType,            0, 3> {};
   class CallsEvalField:        public BitField<bool,                 3, 1> {};
   class LanguageModeField:     public BitField<LanguageMode,         4, 2> {};
-  class QmlModeField:          public BitField<bool,                 6, 1> {};
-  class FunctionVariableField: public BitField<FunctionVariableInfo, 7, 2> {};
-  class FunctionVariableMode:  public BitField<VariableMode,         9, 3> {};
+  class FunctionVariableField: public BitField<FunctionVariableInfo, 6, 2> {};
+  class FunctionVariableMode:  public BitField<VariableMode,         8, 3> {};
 
   // BitFields representing the encoded information for context locals in the
   // ContextLocalInfoEntries part.
@@ -4302,6 +4294,11 @@ class Code: public HeapObject {
   inline byte compare_state();
   inline void set_compare_state(byte value);
 
+  // [compare_operation]: For kind COMPARE_IC tells what compare operation the
+  // stub was generated for.
+  inline byte compare_operation();
+  inline void set_compare_operation(byte value);
+
   // [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in.
   inline byte to_boolean_state();
   inline void set_to_boolean_state(byte value);
@@ -4437,6 +4434,7 @@ class Code: public HeapObject {
   void CodeVerify();
 #endif
   void ClearInlineCaches();
+  void ClearTypeFeedbackCells(Heap* heap);
 
   // Max loop nesting marker used to postpose OSR. We don't take loop
   // nesting that is deeper than 5 levels into account.
@@ -4485,6 +4483,8 @@ class Code: public HeapObject {
 
   static const int kBinaryOpReturnTypeOffset = kBinaryOpTypeOffset + 1;
 
+  static const int kCompareOperationOffset = kCompareStateOffset + 1;
+
   static const int kAllowOSRAtLoopNestingLevelOffset = kFullCodeFlags + 1;
   static const int kProfilerTicksOffset = kAllowOSRAtLoopNestingLevelOffset + 1;
 
@@ -4614,11 +4614,11 @@ class Map: public HeapObject {
 
   // Tells whether the instance has a call-as-function handler.
   inline void set_has_instance_call_handler() {
-    set_bit_field3(bit_field3() | (1 << kHasInstanceCallHandler));
+    set_bit_field(bit_field() | (1 << kHasInstanceCallHandler));
   }
 
   inline bool has_instance_call_handler() {
-    return ((1 << kHasInstanceCallHandler) & bit_field3()) != 0;
+    return ((1 << kHasInstanceCallHandler) & bit_field()) != 0;
   }
 
   inline void set_is_extensible(bool value);
@@ -4691,20 +4691,6 @@ class Map: public HeapObject {
   inline void set_is_access_check_needed(bool access_check_needed);
   inline bool is_access_check_needed();
 
-  // Whether the named interceptor is a fallback interceptor or not
-  inline void set_named_interceptor_is_fallback(bool value);
-  inline bool named_interceptor_is_fallback();
-
-  // Tells whether the instance has the space for an external resource
-  // object
-  inline void set_has_external_resource(bool value);
-  inline bool has_external_resource();
-
-  // Tells whether the user object comparison callback should be used for
-  // comparisons involving this object
-  inline void set_use_user_object_comparison(bool value);
-  inline bool use_user_object_comparison();
-  
   // [prototype]: implicit prototype object.
   DECL_ACCESSORS(prototype, Object)
 
@@ -4727,19 +4713,30 @@ class Map: public HeapObject {
   // [stub cache]: contains stubs compiled for this map.
   DECL_ACCESSORS(code_cache, Object)
 
+  // [back pointer]: points back to the parent map from which a transition
+  // leads to this map. The field overlaps with prototype transitions and the
+  // back pointer will be moved into the prototype transitions array if
+  // required.
+  inline Object* GetBackPointer();
+  inline void SetBackPointer(Object* value,
+                             WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
   // [prototype transitions]: cache of prototype transitions.
   // Prototype transition is a transition that happens
   // when we change object's prototype to a new one.
   // Cache format:
   //    0: finger - index of the first free cell in the cache
-  //    1 + 2 * i: prototype
-  //    2 + 2 * i: target map
+  //    1: back pointer that overlaps with prototype transitions field.
+  //    2 + 2 * i: prototype
+  //    3 + 2 * i: target map
   DECL_ACCESSORS(prototype_transitions, FixedArray)
 
-  inline FixedArray* unchecked_prototype_transitions();
+  inline void init_prototype_transitions(Object* undefined);
+  inline HeapObject* unchecked_prototype_transitions();
 
-  static const int kProtoTransitionHeaderSize = 1;
+  static const int kProtoTransitionHeaderSize = 2;
   static const int kProtoTransitionNumberOfEntriesOffset = 0;
+  static const int kProtoTransitionBackPointerOffset = 1;
   static const int kProtoTransitionElementsPerEntry = 2;
   static const int kProtoTransitionPrototypeOffset = 0;
   static const int kProtoTransitionMapOffset = 1;
@@ -4811,25 +4808,10 @@ class Map: public HeapObject {
   // Removes a code object from the code cache at the given index.
   void RemoveFromCodeCache(String* name, Code* code, int index);
 
-  // For every transition in this map, makes the transition's
-  // target's prototype pointer point back to this map.
-  // This is undone in MarkCompactCollector::ClearNonLiveTransitions().
-  void CreateBackPointers();
-
-  void CreateOneBackPointer(Object* transition_target);
-
-  // Set all map transitions from this map to dead maps to null.
-  // Also, restore the original prototype on the targets of these
-  // transitions, so that we do not process this map again while
-  // following back pointers.
-  void ClearNonLiveTransitions(Heap* heap, Object* real_prototype);
-
-  // Restore a possible back pointer in the prototype field of object.
-  // Return true in that case and false otherwise. Set *keep_entry to
-  // true when a live map transition has been found.
-  bool RestoreOneBackPointer(Object* object,
-                             Object* real_prototype,
-                             bool* keep_entry);
+  // Set all map transitions from this map to dead maps to null.  Also clear
+  // back pointers in transition targets so that we do not process this map
+  // again while following back pointers.
+  void ClearNonLiveTransitions(Heap* heap);
 
   // Computes a hash value for this map, to be used in HashTables and such.
   int Hash();
@@ -4864,6 +4846,14 @@ class Map: public HeapObject {
   Handle<Map> FindTransitionedMap(MapHandleList* candidates);
   Map* FindTransitionedMap(MapList* candidates);
 
+  // Zaps the contents of backing data structures in debug mode. Note that the
+  // heap verifier (i.e. VerifyMarkingVisitor) relies on zapping of objects
+  // holding weak references when incremental marking is used, because it also
+  // iterates over objects that are otherwise unreachable.
+#ifdef DEBUG
+  void ZapInstanceDescriptors();
+  void ZapPrototypeTransitions();
+#endif
 
   // Dispatched behavior.
 #ifdef OBJECT_PRINT
@@ -4911,16 +4901,17 @@ class Map: public HeapObject {
       kConstructorOffset + kPointerSize;
   static const int kCodeCacheOffset =
       kInstanceDescriptorsOrBitField3Offset + kPointerSize;
-  static const int kPrototypeTransitionsOffset =
+  static const int kPrototypeTransitionsOrBackPointerOffset =
       kCodeCacheOffset + kPointerSize;
-  static const int kPadStart = kPrototypeTransitionsOffset + kPointerSize;
+  static const int kPadStart =
+      kPrototypeTransitionsOrBackPointerOffset + kPointerSize;
   static const int kSize = MAP_POINTER_ALIGN(kPadStart);
 
   // Layout of pointer fields. Heap iteration code relies on them
   // being continuously allocated.
   static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
   static const int kPointerFieldsEndOffset =
-      Map::kPrototypeTransitionsOffset + kPointerSize;
+      kPrototypeTransitionsOrBackPointerOffset + kPointerSize;
 
   // Byte offsets within kInstanceSizesOffset.
   static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;
@@ -4948,14 +4939,14 @@ class Map: public HeapObject {
   static const int kHasNamedInterceptor = 3;
   static const int kHasIndexedInterceptor = 4;
   static const int kIsUndetectable = 5;
-  static const int kHasExternalResource = 6;
+  static const int kHasInstanceCallHandler = 6;
   static const int kIsAccessCheckNeeded = 7;
 
   // Bit positions for bit field 2
   static const int kIsExtensible = 0;
   static const int kFunctionWithPrototype = 1;
   static const int kStringWrapperSafeForDefaultValueOf = 2;
-  static const int kUseUserObjectComparison = 3;
+  static const int kAttachedToSharedFunctionInfo = 3;
   // No bits can be used after kElementsKindFirstBit, they are all reserved for
   // storing ElementKind.
   static const int kElementsKindShift = 4;
@@ -4972,9 +4963,6 @@ class Map: public HeapObject {
 
   // Bit positions for bit field 3
   static const int kIsShared = 0;
-  static const int kNamedInterceptorIsFallback = 1;
-  static const int kHasInstanceCallHandler = 2;
-  static const int kAttachedToSharedFunctionInfo = 3;
 
   // Layout of the default cache. It holds alternating name and code objects.
   static const int kCodeCacheEntrySize = 2;
@@ -5362,6 +5350,8 @@ class SharedFunctionInfo: public HeapObject {
   inline int deopt_counter();
   inline void set_deopt_counter(int counter);
 
+  inline int profiler_ticks();
+
   // Inline cache age is used to infer whether the function survived a context
   // disposal or not. In the former case we reset the opt_count.
   inline int ic_age();
@@ -5413,9 +5403,6 @@ class SharedFunctionInfo: public HeapObject {
   // Indicates whether the language mode of this function is EXTENDED_MODE.
   inline bool is_extended_mode();
 
-  // Indicates whether the function is a qml mode function.
-  DECL_BOOLEAN_ACCESSORS(qml_mode)
-
   // False if the function definitely does not allocate an arguments object.
   DECL_BOOLEAN_ACCESSORS(uses_arguments)
 
@@ -5658,7 +5645,6 @@ class SharedFunctionInfo: public HeapObject {
     kOptimizationDisabled = kCodeAgeShift + kCodeAgeSize,
     kStrictModeFunction,
     kExtendedModeFunction,
-    kQmlModeFunction,
     kUsesArguments,
     kHasDuplicateParameters,
     kNative,
@@ -5724,6 +5710,35 @@ class SharedFunctionInfo: public HeapObject {
 };
 
 
+// Representation for module instance objects.
+class JSModule: public JSObject {
+ public:
+  // [context]: the context holding the module's locals, or undefined if none.
+  DECL_ACCESSORS(context, Object)
+
+  // Casting.
+  static inline JSModule* cast(Object* obj);
+
+  // Dispatched behavior.
+#ifdef OBJECT_PRINT
+  inline void JSModulePrint() {
+    JSModulePrint(stdout);
+  }
+  void JSModulePrint(FILE* out);
+#endif
+#ifdef DEBUG
+  void JSModuleVerify();
+#endif
+
+  // Layout description.
+  static const int kContextOffset = JSObject::kHeaderSize;
+  static const int kSize = kContextOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSModule);
+};
+
+
 // JSFunction describes JavaScript functions.
 class JSFunction: public JSObject {
  public:
@@ -6123,7 +6138,7 @@ class JSDate: public JSObject {
 
   // Returns the date field with the specified index.
   // See FieldIndex for the list of date fields.
-  static MaybeObject* GetField(Object* date, Smi* index);
+  static Object* GetField(Object* date, Smi* index);
 
   void SetValue(Object* value, bool is_value_nan);
 
@@ -6872,7 +6887,7 @@ class String: public HeapObject {
   inline void Set(int index, uint16_t value);
   // Get individual two byte char in the string.  Repeated calls
   // to this method are not efficient unless the string is flat.
-  inline uint16_t Get(int index);
+  INLINE(uint16_t Get(int index));
 
   // Try to flatten the string.  Checks first inline to see if it is
   // necessary.  Does nothing if the string is not a cons string.
@@ -6922,9 +6937,6 @@ class String: public HeapObject {
   bool IsAsciiEqualTo(Vector<const char> str);
   bool IsTwoByteEqualTo(Vector<const uc16> str);
 
-  bool SlowEqualsExternal(uc16 *string, int length);
-  bool SlowEqualsExternal(char *string, int length);
-
   // Return a UTF8 representation of the string.  The string is null
   // terminated but may optionally contain nulls.  Length is returned
   // in length_output if length_output is not a null pointer  The string
@@ -7180,13 +7192,8 @@ class SeqString: public String {
   // Casting.
   static inline SeqString* cast(Object* obj);
 
-  // Get and set the symbol id of the string
-  inline int symbol_id();
-  inline void set_symbol_id(int value);
-
   // Layout description.
-  static const int kSymbolIdOffset = String::kSize;
-  static const int kHeaderSize = kSymbolIdOffset + kPointerSize;
+  static const int kHeaderSize = String::kSize;
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(SeqString);
@@ -7648,6 +7655,10 @@ class Oddball: public HeapObject {
                               kToNumberOffset + kPointerSize,
                               kSize> BodyDescriptor;
 
+  STATIC_CHECK(kKindOffset == Internals::kOddballKindOffset);
+  STATIC_CHECK(kNull == Internals::kNullOddballKind);
+  STATIC_CHECK(kUndefined == Internals::kUndefinedOddballKind);
+
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(Oddball);
 };
@@ -8182,7 +8193,6 @@ class InterceptorInfo: public Struct {
   DECL_ACCESSORS(deleter, Object)
   DECL_ACCESSORS(enumerator, Object)
   DECL_ACCESSORS(data, Object)
-  DECL_ACCESSORS(is_fallback, Smi)
 
   static inline InterceptorInfo* cast(Object* obj);
 
@@ -8202,8 +8212,7 @@ class InterceptorInfo: public Struct {
   static const int kDeleterOffset = kQueryOffset + kPointerSize;
   static const int kEnumeratorOffset = kDeleterOffset + kPointerSize;
   static const int kDataOffset = kEnumeratorOffset + kPointerSize;
-  static const int kFallbackOffset = kDataOffset + kPointerSize;
-  static const int kSize = kFallbackOffset + kPointerSize;
+  static const int kSize = kDataOffset + kPointerSize;
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(InterceptorInfo);
@@ -8326,8 +8335,6 @@ class ObjectTemplateInfo: public TemplateInfo {
  public:
   DECL_ACCESSORS(constructor, Object)
   DECL_ACCESSORS(internal_field_count, Object)
-  DECL_ACCESSORS(has_external_resource, Object)
-  DECL_ACCESSORS(use_user_object_comparison, Object)
 
   static inline ObjectTemplateInfo* cast(Object* obj);
 
@@ -8344,9 +8351,7 @@ class ObjectTemplateInfo: public TemplateInfo {
   static const int kConstructorOffset = TemplateInfo::kHeaderSize;
   static const int kInternalFieldCountOffset =
       kConstructorOffset + kPointerSize;
-  static const int kHasExternalResourceOffset = kInternalFieldCountOffset + kPointerSize;
-  static const int kUseUserObjectComparisonOffset = kHasExternalResourceOffset + kPointerSize;
-  static const int kSize = kUseUserObjectComparisonOffset + kPointerSize;
+  static const int kSize = kInternalFieldCountOffset + kPointerSize;
 };
 
 
index fba6b48..8620519 100644 (file)
@@ -617,9 +617,6 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
     scope->set_end_position(source->length());
     FunctionState function_state(this, scope, isolate());
     top_scope_->SetLanguageMode(info->language_mode());
-    if (info->is_qml_mode()) {
-      scope->EnableQmlModeFlag();
-    }
     ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16);
     bool ok = true;
     int beg_loc = scanner().location().beg_pos;
@@ -718,9 +715,6 @@ FunctionLiteral* Parser::ParseLazy(CompilationInfo* info,
            info->is_extended_mode());
     ASSERT(info->language_mode() == shared_info->language_mode());
     scope->SetLanguageMode(shared_info->language_mode());
-    if (shared_info->qml_mode()) {
-      top_scope_->EnableQmlModeFlag();
-    }
     FunctionLiteral::Type type = shared_info->is_expression()
         ? (shared_info->is_anonymous()
               ? FunctionLiteral::ANONYMOUS_EXPRESSION
@@ -1339,11 +1333,19 @@ Module* Parser::ParseModuleLiteral(bool* ok) {
 
   Expect(Token::RBRACE, CHECK_OK);
   scope->set_end_position(scanner().location().end_pos);
-  body->set_block_scope(scope);
+  body->set_scope(scope);
 
-  scope->interface()->Freeze(ok);
+  // Instance objects have to be created ahead of time (before code generation
+  // linking them) because of potentially cyclic references between them.
+  // We create them here, to avoid another pass over the AST.
+  Interface* interface = scope->interface();
+  interface->MakeModule(ok);
+  ASSERT(ok);
+  interface->MakeSingleton(Isolate::Current()->factory()->NewJSModule(), ok);
   ASSERT(ok);
-  return factory()->NewModuleLiteral(body, scope->interface());
+  interface->Freeze(ok);
+  ASSERT(ok);
+  return factory()->NewModuleLiteral(body, interface);
 }
 
 
@@ -1409,7 +1411,14 @@ Module* Parser::ParseModuleUrl(bool* ok) {
 #ifdef DEBUG
   if (FLAG_print_interface_details) PrintF("# Url ");
 #endif
-  return factory()->NewModuleUrl(symbol);
+
+  Module* result = factory()->NewModuleUrl(symbol);
+  Interface* interface = result->interface();
+  interface->MakeSingleton(Isolate::Current()->factory()->NewJSModule(), ok);
+  ASSERT(ok);
+  interface->Freeze(ok);
+  ASSERT(ok);
+  return result;
 }
 
 
@@ -2021,7 +2030,7 @@ Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
   Expect(Token::RBRACE, CHECK_OK);
   block_scope->set_end_position(scanner().location().end_pos);
   block_scope = block_scope->FinalizeBlockScope();
-  body->set_block_scope(block_scope);
+  body->set_scope(block_scope);
   return body;
 }
 
@@ -2260,7 +2269,7 @@ Block* Parser::ParseVariableDeclarations(
     // Global variable declarations must be compiled in a specific
     // way. When the script containing the global variable declaration
     // is entered, the global variable must be declared, so that if it
-    // doesn't exist (not even in a prototype of the global object) it
+    // doesn't exist (on the global object itself, see ES5 errata) it
     // gets created with an initial undefined value. This is handled
     // by the declarations part of the function representing the
     // top-level global code; see Runtime::DeclareGlobalVariable. If
@@ -2287,11 +2296,6 @@ Block* Parser::ParseVariableDeclarations(
         arguments->Add(value);
         value = NULL;  // zap the value to avoid the unnecessary assignment
 
-        int qml_mode = 0;
-        if (top_scope_->is_qml_mode() && !Isolate::Current()->global()->HasProperty(*name))
-          qml_mode = 1;
-        arguments->Add(factory()->NewNumberLiteral(qml_mode));
-
         // Construct the call to Runtime_InitializeConstGlobal
         // and add it to the initialization statement block.
         // Note that the function does different things depending on
@@ -2306,11 +2310,6 @@ Block* Parser::ParseVariableDeclarations(
         LanguageMode language_mode = initialization_scope->language_mode();
         arguments->Add(factory()->NewNumberLiteral(language_mode));
 
-        int qml_mode = 0;
-        if (top_scope_->is_qml_mode() && !Isolate::Current()->global()->HasProperty(*name))
-          qml_mode = 1;
-        arguments->Add(factory()->NewNumberLiteral(qml_mode));
-
         // Be careful not to assign a value to the global variable if
         // we're in a with. The initialization value should not
         // necessarily be stored in the global object in that case,
@@ -2933,7 +2932,7 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
         top_scope_ = saved_scope;
         for_scope->set_end_position(scanner().location().end_pos);
         for_scope = for_scope->FinalizeBlockScope();
-        body_block->set_block_scope(for_scope);
+        body_block->set_scope(for_scope);
         // Parsed for-in loop w/ let declaration.
         return loop;
 
@@ -3013,7 +3012,7 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
     Block* result = factory()->NewBlock(NULL, 2, false);
     result->AddStatement(init);
     result->AddStatement(loop);
-    result->set_block_scope(for_scope);
+    result->set_scope(for_scope);
     if (loop) loop->Initialize(NULL, cond, next, body);
     return result;
   } else {
@@ -4476,15 +4475,15 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
     Variable* fvar = NULL;
     Token::Value fvar_init_op = Token::INIT_CONST;
     if (type == FunctionLiteral::NAMED_EXPRESSION) {
-      VariableMode fvar_mode;
-      if (is_extended_mode()) {
-        fvar_mode = CONST_HARMONY;
-        fvar_init_op = Token::INIT_CONST_HARMONY;
-      } else {
-        fvar_mode = CONST;
-      }
-      fvar =
-          top_scope_->DeclareFunctionVar(function_name, fvar_mode, factory());
+      if (is_extended_mode()) fvar_init_op = Token::INIT_CONST_HARMONY;
+      VariableMode fvar_mode = is_extended_mode() ? CONST_HARMONY : CONST;
+      fvar = new(zone()) Variable(top_scope_,
+         function_name, fvar_mode, true /* is valid LHS */,
+         Variable::NORMAL, kCreatedInitialized);
+      VariableProxy* proxy = factory()->NewVariableProxy(fvar);
+      VariableDeclaration* fvar_declaration =
+          factory()->NewVariableDeclaration(proxy, fvar_mode, top_scope_);
+      top_scope_->DeclareFunctionVar(fvar_declaration);
     }
 
     // Determine whether the function will be lazily compiled.
index 8b1e381..089ea38 100644 (file)
@@ -620,11 +620,8 @@ class SamplerThread : public Thread {
       : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
         interval_(interval) {}
 
-  static void SetUp() {
-    if (!mutex_) {
-      mutex_ = OS::CreateMutex();
-    }
-  }
+  static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
+  static void TearDown() { delete mutex_; }
 
   static void AddActiveSampler(Sampler* sampler) {
     ScopedLock lock(mutex_);
@@ -749,6 +746,12 @@ void OS::SetUp() {
 }
 
 
+void OS::TearDown() {
+  SamplerThread::TearDown();
+  delete limit_mutex;
+}
+
+
 Sampler::Sampler(Isolate* isolate, int interval)
     : isolate_(isolate),
       interval_(interval),
index 6b1c987..511759c 100644 (file)
@@ -554,6 +554,7 @@ class FreeBSDMutex : public Mutex {
     ASSERT(result == 0);
     result = pthread_mutex_init(&mutex_, &attrs);
     ASSERT(result == 0);
+    USE(result);
   }
 
   virtual ~FreeBSDMutex() { pthread_mutex_destroy(&mutex_); }
@@ -716,11 +717,8 @@ class SignalSender : public Thread {
       : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
         interval_(interval) {}
 
-  static void SetUp() {
-    if (!mutex_) {
-      mutex_ = OS::CreateMutex();
-    }
-  }
+  static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
+  static void TearDown() { delete mutex_; }
 
   static void AddActiveSampler(Sampler* sampler) {
     ScopedLock lock(mutex_);
@@ -864,6 +862,12 @@ void OS::SetUp() {
 }
 
 
+void OS::TearDown() {
+  SignalSender::TearDown();
+  delete limit_mutex;
+}
+
+
 Sampler::Sampler(Isolate* isolate, int interval)
     : isolate_(isolate),
       interval_(interval),
index 9bea32d..f6db423 100644 (file)
@@ -964,6 +964,25 @@ typedef struct ucontext {
   __sigset_t uc_sigmask;
 } ucontext_t;
 
+#elif !defined(__GLIBC__) && defined(__i386__)
+// x86 version for Android.
+struct sigcontext {
+  uint32_t gregs[19];
+  void* fpregs;
+  uint32_t oldmask;
+  uint32_t cr2;
+};
+
+typedef uint32_t __sigset_t;
+typedef struct sigcontext mcontext_t;
+typedef struct ucontext {
+  uint32_t uc_flags;
+  struct ucontext* uc_link;
+  stack_t uc_stack;
+  mcontext_t uc_mcontext;
+  __sigset_t uc_sigmask;
+} ucontext_t;
+enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
 #endif
 
 
@@ -1055,11 +1074,8 @@ class SignalSender : public Thread {
         vm_tgid_(getpid()),
         interval_(interval) {}
 
-  static void SetUp() {
-    if (!mutex_) {
-      mutex_ = OS::CreateMutex();
-    }
-  }
+  static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
+  static void TearDown() { delete mutex_; }
 
   static void InstallSignalHandler() {
     struct sigaction sa;
@@ -1238,6 +1254,12 @@ void OS::SetUp() {
 }
 
 
+void OS::TearDown() {
+  SignalSender::TearDown();
+  delete limit_mutex;
+}
+
+
 Sampler::Sampler(Isolate* isolate, int interval)
     : isolate_(isolate),
       interval_(interval),
index afcd80a..a937ed3 100644 (file)
@@ -743,11 +743,8 @@ class SamplerThread : public Thread {
       : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
         interval_(interval) {}
 
-  static void SetUp() {
-    if (!mutex_) {
-      mutex_ = OS::CreateMutex();
-    }
-  }
+  static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
+  static void TearDown() { delete mutex_; }
 
   static void AddActiveSampler(Sampler* sampler) {
     ScopedLock lock(mutex_);
@@ -881,6 +878,12 @@ void OS::SetUp() {
 }
 
 
+void OS::TearDown() {
+  SamplerThread::TearDown();
+  delete limit_mutex;
+}
+
+
 Sampler::Sampler(Isolate* isolate, int interval)
     : isolate_(isolate),
       interval_(interval),
index 42799db..679ef8e 100644 (file)
@@ -91,6 +91,11 @@ void OS::PostSetUp() {
 }
 
 
+void OS::TearDown() {
+  UNIMPLEMENTED();
+}
+
+
 // Returns the accumulated user time for thread.
 int OS::GetUserTime(uint32_t* secs,  uint32_t* usecs) {
   UNIMPLEMENTED();
index 2b2d530..ba33a84 100644 (file)
@@ -793,11 +793,8 @@ class SignalSender : public Thread {
         vm_tgid_(getpid()),
         interval_(interval) {}
 
-  static void SetUp() {
-    if (!mutex_) {
-      mutex_ = OS::CreateMutex();
-    }
-  }
+  static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
+  static void TearDown() { delete mutex_; }
 
   static void InstallSignalHandler() {
     struct sigaction sa;
@@ -948,6 +945,12 @@ void OS::SetUp() {
 }
 
 
+void OS::TearDown() {
+  SignalSender::TearDown();
+  delete limit_mutex;
+}
+
+
 Sampler::Sampler(Isolate* isolate, int interval)
     : isolate_(isolate),
       interval_(interval),
index 6631659..d942d78 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -421,7 +421,11 @@ Socket* POSIXSocket::Accept() const {
     return NULL;
   }
 
-  int socket = accept(socket_, NULL, NULL);
+  int socket;
+  do {
+    socket = accept(socket_, NULL, NULL);
+  } while (socket == -1 && errno == EINTR);
+
   if (socket == -1) {
     return NULL;
   } else {
@@ -448,7 +452,9 @@ bool POSIXSocket::Connect(const char* host, const char* port) {
   }
 
   // Connect.
-  status = connect(socket_, result->ai_addr, result->ai_addrlen);
+  do {
+    status = connect(socket_, result->ai_addr, result->ai_addrlen);
+  } while (status == -1 && errno == EINTR);
   freeaddrinfo(result);
   return status == 0;
 }
@@ -467,14 +473,29 @@ bool POSIXSocket::Shutdown() {
 
 
 int POSIXSocket::Send(const char* data, int len) const {
-  int status = send(socket_, data, len, 0);
-  return status;
+  if (len <= 0) return 0;
+  int written = 0;
+  while (written < len) {
+    int status = send(socket_, data + written, len - written, 0);
+    if (status == 0) {
+      break;
+    } else if (status > 0) {
+      written += status;
+    } else if (errno != EINTR) {
+      return 0;
+    }
+  }
+  return written;
 }
 
 
 int POSIXSocket::Receive(char* data, int len) const {
-  int status = recv(socket_, data, len, 0);
-  return status;
+  if (len <= 0) return 0;
+  int status;
+  do {
+    status = recv(socket_, data, len, 0);
+  } while (status == -1 && errno == EINTR);
+  return (status < 0) ? 0 : status;
 }
 
 
diff --git a/src/3rdparty/v8/src/platform-qnx.cc b/src/3rdparty/v8/src/platform-qnx.cc
deleted file mode 100644 (file)
index 53e6cf5..0000000
+++ /dev/null
@@ -1,1078 +0,0 @@
-// Copyright 2012 Research in Motion. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Platform specific code for QNX goes here. For the POSIX comaptible parts
-// the implementation is in platform-posix.cc.
-
-#include <pthread.h>
-#include <semaphore.h>
-#include <signal.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <sys/types.h>
-#include <stdlib.h>
-#include <ucontext.h>
-#include <backtrace.h>
-
-// QNX requires memory pages to be marked as
-// executable. Otherwise, OS raises an exception when executing code
-// in that page.
-#include <sys/types.h>  // mmap & munmap
-#include <sys/mman.h>   // mmap & munmap
-#include <sys/stat.h>   // open
-#include <fcntl.h>      // open
-#include <unistd.h>     // sysconf
-#include <strings.h>    // index
-#include <errno.h>
-#include <stdarg.h>
-#include <sys/procfs.h>
-#include <sys/syspage.h>
-
-#undef MAP_TYPE
-
-#include "v8.h"
-
-#include "platform.h"
-#include "platform-posix.h"
-#include "v8threads.h"
-#include "vm-state-inl.h"
-
-
-namespace v8 {
-namespace internal {
-
-// 0 is never a valid thread id on QNX since tids and pids share a
-// name space and pid 0 is reserved (see man 2 kill).
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-double ceiling(double x) {
-  return ceil(x);
-}
-
-
-static Mutex* limit_mutex = NULL;
-
-
-void OS::SetUp() {
-  // Seed the random number generator. We preserve microsecond resolution.
-  uint64_t seed = Ticks() ^ (getpid() << 16);
-  srandom(static_cast<unsigned int>(seed));
-  limit_mutex = CreateMutex();
-
-#ifdef __arm__
-  // When running on ARM hardware check that the EABI used by V8 and
-  // by the C code is the same.
-  bool hard_float = OS::ArmUsingHardFloat();
-  if (hard_float) {
-#if !USE_EABI_HARDFLOAT
-    PrintF("ERROR: Binary compiled with -mfloat-abi=hard but without "
-           "-DUSE_EABI_HARDFLOAT\n");
-    exit(1);
-#endif
-  } else {
-#if USE_EABI_HARDFLOAT
-    PrintF("ERROR: Binary not compiled with -mfloat-abi=hard but with "
-           "-DUSE_EABI_HARDFLOAT\n");
-    exit(1);
-#endif
-  }
-#endif
-}
-
-
-void OS::PostSetUp() {
-  POSIXPostSetUp();
-}
-
-
-uint64_t OS::CpuFeaturesImpliedByPlatform() {
-  return 0;  // QNX runs on anything.
-}
-
-
-#ifdef __arm__
-static bool CPUInfoContainsString(const char * search_string) {
-  const char* file_name = "/proc/cpuinfo";
-  // This is written as a straight shot one pass parser
-  // and not using STL string and ifstream because,
-  // on QNX, it's reading from a (non-mmap-able)
-  // character special device.
-  FILE* f = NULL;
-  const char* what = search_string;
-
-  if (NULL == (f = fopen(file_name, "r")))
-    return false;
-
-  int k;
-  while (EOF != (k = fgetc(f))) {
-    if (k == *what) {
-      ++what;
-      while ((*what != '\0') && (*what == fgetc(f))) {
-        ++what;
-      }
-      if (*what == '\0') {
-        fclose(f);
-        return true;
-      } else {
-        what = search_string;
-      }
-    }
-  }
-  fclose(f);
-
-  // Did not find string in the proc file.
-  return false;
-}
-
-
-bool OS::ArmCpuHasFeature(CpuFeature feature) {
-  switch (feature) {
-    case VFP3:
-      // All shipping devices currently support this and QNX has no easy way to
-      // determine this at runtime.
-      return true;
-    case ARMv7:
-      return (SYSPAGE_ENTRY(cpuinfo)->flags & ARM_CPU_FLAG_V7) != 0;
-    default:
-      UNREACHABLE();
-  }
-
-  return false;
-}
-
-
-// Simple helper function to detect whether the C code is compiled with
-// option -mfloat-abi=hard. The register d0 is loaded with 1.0 and the register
-// pair r0, r1 is loaded with 0.0. If -mfloat-abi=hard is pased to GCC then
-// calling this will return 1.0 and otherwise 0.0.
-static void ArmUsingHardFloatHelper() {
-  asm("mov r0, #0");
-#if defined(__VFP_FP__) && !defined(__SOFTFP__)
-  // Load 0x3ff00000 into r1 using instructions available in both ARM
-  // and Thumb mode.
-  asm("mov r1, #3");
-  asm("mov r2, #255");
-  asm("lsl r1, r1, #8");
-  asm("orr r1, r1, r2");
-  asm("lsl r1, r1, #20");
-  // For vmov d0, r0, r1 use ARM mode.
-#ifdef __thumb__
-  asm volatile(
-    "@   Enter ARM Mode  \n\t"
-    "    adr r3, 1f      \n\t"
-    "    bx  r3          \n\t"
-    "    .ALIGN 4        \n\t"
-    "    .ARM            \n"
-    "1:  vmov d0, r0, r1 \n\t"
-    "@   Enter THUMB Mode\n\t"
-    "    adr r3, 2f+1    \n\t"
-    "    bx  r3          \n\t"
-    "    .THUMB          \n"
-    "2:                  \n\t");
-#else
-  asm("vmov d0, r0, r1");
-#endif  // __thumb__
-#endif  // defined(__VFP_FP__) && !defined(__SOFTFP__)
-  asm("mov r1, #0");
-}
-
-
-bool OS::ArmUsingHardFloat() {
-  // Cast helper function from returning void to returning double.
-  typedef double (*F)();
-  F f = FUNCTION_CAST<F>(FUNCTION_ADDR(ArmUsingHardFloatHelper));
-  return f() == 1.0;
-}
-#endif  // def __arm__
-
-
-int OS::ActivationFrameAlignment() {
-#ifdef V8_TARGET_ARCH_ARM
-  // On EABI ARM targets this is required for fp correctness in the
-  // runtime system.
-  return 8;
-#endif
-  // With gcc 4.4 the tree vectorization optimizer can generate code
-  // that requires 16 byte alignment such as movdqa on x86.
-  return 16;
-}
-
-
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
-#if defined(V8_TARGET_ARCH_ARM) && defined(__arm__)
-  // Only use on ARM hardware.
-  MemoryBarrier();
-#else
-  __asm__ __volatile__("" : : : "memory");
-  // An x86 store acts as a release barrier.
-#endif
-  *ptr = value;
-}
-
-
-const char* OS::LocalTimezone(double time) {
-  if (isnan(time)) return "";
-  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
-  struct tm* t = localtime(&tv);
-  if (NULL == t) return "";
-  return t->tm_zone;
-}
-
-
-double OS::LocalTimeOffset() {
-  time_t tv = time(NULL);
-  struct tm* t = localtime(&tv);
-  // tm_gmtoff includes any daylight savings offset, so subtract it.
-  return static_cast<double>(t->tm_gmtoff * msPerSecond -
-                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
-// We keep the lowest and highest addresses mapped as a quick way of
-// determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, ie, not all addresses in
-// 'allocated' space are actually allocated to our heap.  The range is
-// [lowest, highest), inclusive on the low and and exclusive on the high end.
-static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
-static void* highest_ever_allocated = reinterpret_cast<void*>(0);
-
-
-static void UpdateAllocatedSpaceLimits(void* address, int size) {
-  ASSERT(limit_mutex != NULL);
-  ScopedLock lock(limit_mutex);
-
-  lowest_ever_allocated = Min(lowest_ever_allocated, address);
-  highest_ever_allocated =
-      Max(highest_ever_allocated,
-          reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
-}
-
-
-bool OS::IsOutsideAllocatedSpace(void* address) {
-  return address < lowest_ever_allocated || address >= highest_ever_allocated;
-}
-
-
-size_t OS::AllocateAlignment() {
-  return sysconf(_SC_PAGESIZE);
-}
-
-
-void* OS::Allocate(const size_t requested,
-                   size_t* allocated,
-                   bool is_executable) {
-  const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  void* addr = GetRandomMmapAddr();
-  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-  if (mbase == MAP_FAILED) {
-    LOG(i::Isolate::Current(),
-        StringEvent("OS::Allocate", "mmap failed"));
-    return NULL;
-  }
-  *allocated = msize;
-  UpdateAllocatedSpaceLimits(mbase, msize);
-  return mbase;
-}
-
-
-void OS::Free(void* address, const size_t size) {
-  // TODO(1240712): munmap has a return value which is ignored here.
-  int result = munmap(address, size);
-  USE(result);
-  ASSERT(result == 0);
-}
-
-
-void OS::Sleep(int milliseconds) {
-  unsigned int ms = static_cast<unsigned int>(milliseconds);
-  usleep(1000 * ms);
-}
-
-
-void OS::Abort() {
-  // Redirect to std abort to signal abnormal program termination.
-  abort();
-}
-
-
-void OS::DebugBreak() {
-// TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
-//  which is the architecture of generated code).
-#if (defined(__arm__) || defined(__thumb__))
-# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
-  asm("bkpt 0");
-# endif
-#else
-  asm("int $3");
-#endif
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
-  PosixMemoryMappedFile(FILE* file, void* memory, int size)
-    : file_(file), memory_(memory), size_(size) { }
-  virtual ~PosixMemoryMappedFile();
-  virtual void* memory() { return memory_; }
-  virtual int size() { return size_; }
- private:
-  FILE* file_;
-  void* memory_;
-  int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
-  FILE* file = fopen(name, "r+");
-  if (file == NULL) return NULL;
-
-  fseek(file, 0, SEEK_END);
-  int size = ftell(file);
-
-  void* memory =
-      mmap(OS::GetRandomMmapAddr(),
-           size,
-           PROT_READ | PROT_WRITE,
-           MAP_SHARED,
-           fileno(file),
-           0);
-  return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
-    void* initial) {
-  FILE* file = fopen(name, "w+");
-  if (file == NULL) return NULL;
-  int result = fwrite(initial, size, 1, file);
-  if (result < 1) {
-    fclose(file);
-    return NULL;
-  }
-  void* memory =
-      mmap(OS::GetRandomMmapAddr(),
-           size,
-           PROT_READ | PROT_WRITE,
-           MAP_SHARED,
-           fileno(file),
-           0);
-  return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-  if (memory_) munmap(memory_, size_);
-  fclose(file_);
-}
-
-
-void OS::LogSharedLibraryAddresses() {
-  procfs_mapinfo *mapinfos = NULL, *mapinfo;
-  int proc_fd, num, i;
-
-  struct {
-    procfs_debuginfo info;
-    char buff[PATH_MAX];
-  } map;
-
-  char buf[PATH_MAX + 1];
-  sprintf(buf, "/proc/%d/as", getpid());
-
-  if ((proc_fd = open(buf, O_RDONLY)) == -1) {
-    close(proc_fd);
-    return;
-  }
-
-  /* Get the number of map entrys.  */
-  if (devctl(proc_fd, DCMD_PROC_MAPINFO, NULL, 0, &num) != EOK) {
-    close(proc_fd);
-    return;
-  }
-
-  mapinfos =(procfs_mapinfo *)malloc(num * sizeof(procfs_mapinfo));
-  if (mapinfos == NULL) {
-    close(proc_fd);
-    return;
-  }
-
-  /* Fill the map entrys.  */
-  if (devctl(proc_fd, DCMD_PROC_PAGEDATA, mapinfos, num * sizeof(procfs_mapinfo), &num) != EOK) {
-    free(mapinfos);
-    close(proc_fd);
-    return;
-  }
-
-  i::Isolate* isolate = ISOLATE;
-
-  for (i = 0; i < num; i++) {
-    mapinfo = mapinfos + i;
-    if (mapinfo->flags & MAP_ELF) {
-      map.info.vaddr = mapinfo->vaddr;
-      if (devctl(proc_fd, DCMD_PROC_MAPDEBUG, &map, sizeof(map), 0) != EOK)
-           continue;
-
-         LOG(isolate, SharedLibraryEvent(map.info.path, mapinfo->vaddr, mapinfo->vaddr + mapinfo->size));
-       }
-  }
-  free(mapinfos);
-  close(proc_fd);
-}
-
-
-static const char kGCFakeMmap[] = "/tmp/__v8_gc__";
-
-
-void OS::SignalCodeMovingGC() {
-  // Support for ll_prof.py.
-  //
-  // The QNX profiler built into the kernel logs all mmap's with
-  // PROT_EXEC so that analysis tools can properly attribute ticks. We
-  // do a mmap with a name known by ll_prof.py and immediately munmap
-  // it. This injects a GC marker into the stream of events generated
-  // by the kernel and allows us to synchronize V8 code log and the
-  // kernel log.
-  int size = sysconf(_SC_PAGESIZE);
-  FILE* f = fopen(kGCFakeMmap, "w+");
-  void* addr = mmap(OS::GetRandomMmapAddr(),
-                    size,
-                    PROT_READ | PROT_EXEC,
-                    MAP_PRIVATE,
-                    fileno(f),
-                    0);
-  ASSERT(addr != MAP_FAILED);
-  munmap(addr, size);
-  fclose(f);
-}
-
-
-int OS::StackWalk(Vector<OS::StackFrame> frames) {
-  int frames_size = frames.length();
-  bt_addr_t addresses[frames_size];
-  bt_accessor_t acc;
-  bt_memmap_t memmap;
-  bt_init_accessor(&acc, BT_SELF);
-  bt_load_memmap(&acc, &memmap);
-  int frames_count = bt_get_backtrace(&acc, addresses, frames_size);
-  bt_addr_t temp_addr[1];
-  for (int i = 0; i < frames_count; i++) {
-    frames[i].address = reinterpret_cast<void*>(addresses[i]);
-    temp_addr[0] = addresses[i];
-    // Format a text representation of the frame based on the information
-    // available.
-    bt_sprnf_addrs(&memmap, temp_addr, 1, "%a", frames[i].text, kStackWalkMaxTextLen, 0);
-    // Make sure line termination is in place.
-    frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
-  }
-  bt_unload_memmap(&memmap);
-  bt_release_accessor(&acc);
-  return 0;
-}
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-VirtualMemory::VirtualMemory(size_t size) {
-  address_ = ReserveRegion(size);
-  size_ = size;
-}
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
-    : address_(NULL), size_(0) {
-  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* reservation = mmap(OS::GetRandomMmapAddr(),
-                           request_size,
-                           PROT_NONE,
-                           MAP_PRIVATE | MAP_ANONYMOUS,
-                           kMmapFd,
-                           kMmapFdOffset);
-  if (reservation == MAP_FAILED) return;
-
-  Address base = static_cast<Address>(reservation);
-  Address aligned_base = RoundUp(base, alignment);
-  ASSERT_LE(base, aligned_base);
-
-  // Unmap extra memory reserved before and after the desired block.
-  if (aligned_base != base) {
-    size_t prefix_size = static_cast<size_t>(aligned_base - base);
-    OS::Free(base, prefix_size);
-    request_size -= prefix_size;
-  }
-
-  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
-  ASSERT_LE(aligned_size, request_size);
-
-  if (aligned_size != request_size) {
-    size_t suffix_size = request_size - aligned_size;
-    OS::Free(aligned_base + aligned_size, suffix_size);
-    request_size -= suffix_size;
-  }
-
-  ASSERT(aligned_size == request_size);
-
-  address_ = static_cast<void*>(aligned_base);
-  size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    ASSERT(result);
-    USE(result);
-  }
-}
-
-
-bool VirtualMemory::IsReserved() {
-  return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  OS::Guard(address, OS::CommitPageSize());
-  return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
-  void* result = mmap(OS::GetRandomMmapAddr(),
-                      size,
-                      PROT_NONE,
-                      MAP_PRIVATE | MAP_ANONYMOUS,
-                      kMmapFd,
-                      kMmapFdOffset);
-
-  if (result == MAP_FAILED) return NULL;
-
-  return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(base,
-                         size,
-                         prot,
-                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
-                         kMmapFd,
-                         kMmapFdOffset)) {
-    return false;
-  }
-
-  UpdateAllocatedSpaceLimits(base, size);
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return mmap(base,
-              size,
-              PROT_NONE,
-              MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
-              kMmapFd,
-              kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return munmap(base, size) == 0;
-}
-
-
-class Thread::PlatformData : public Malloced {
- public:
-  PlatformData() : thread_(kNoThread) {}
-
-  pthread_t thread_;  // Thread handle for pthread.
-};
-
-Thread::Thread(const Options& options)
-    : data_(new PlatformData()),
-      stack_size_(options.stack_size()) {
-  set_name(options.name());
-}
-
-
-Thread::~Thread() {
-  delete data_;
-}
-
-
-static void* ThreadEntry(void* arg) {
-  Thread* thread = reinterpret_cast<Thread*>(arg);
-  // This is also initialized by the first argument to pthread_create() but we
-  // don't know which thread will run first (the original thread or the new
-  // one) so we initialize it here too.
-#ifdef PR_SET_NAME
-  prctl(PR_SET_NAME,
-        reinterpret_cast<unsigned long>(thread->name()),  // NOLINT
-        0, 0, 0);
-#endif
-  thread->data()->thread_ = pthread_self();
-  ASSERT(thread->data()->thread_ != kNoThread);
-  thread->Run();
-  return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
-  strncpy(name_, name, sizeof(name_));
-  name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
-  pthread_attr_t* attr_ptr = NULL;
-  pthread_attr_t attr;
-  if (stack_size_ > 0) {
-    pthread_attr_init(&attr);
-    pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
-    attr_ptr = &attr;
-  }
-  int result = pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
-  CHECK_EQ(0, result);
-  ASSERT(data_->thread_ != kNoThread);
-}
-
-
-void Thread::Join() {
-  pthread_join(data_->thread_, NULL);
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
-  pthread_key_t key;
-  int result = pthread_key_create(&key, NULL);
-  USE(result);
-  ASSERT(result == 0);
-  return static_cast<LocalStorageKey>(key);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
-  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
-  int result = pthread_key_delete(pthread_key);
-  USE(result);
-  ASSERT(result == 0);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
-  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
-  return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
-  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
-  pthread_setspecific(pthread_key, value);
-}
-
-
-void Thread::YieldCPU() {
-  sched_yield();
-}
-
-
-class QNXMutex : public Mutex {
- public:
-  QNXMutex() {
-    pthread_mutexattr_t attrs;
-    int result = pthread_mutexattr_init(&attrs);
-    ASSERT(result == 0);
-    result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
-    ASSERT(result == 0);
-    result = pthread_mutex_init(&mutex_, &attrs);
-    ASSERT(result == 0);
-    USE(result);
-  }
-
-  virtual ~QNXMutex() { pthread_mutex_destroy(&mutex_); }
-
-  virtual int Lock() {
-    int result = pthread_mutex_lock(&mutex_);
-    return result;
-  }
-
-  virtual int Unlock() {
-    int result = pthread_mutex_unlock(&mutex_);
-    return result;
-  }
-
-  virtual bool TryLock() {
-    int result = pthread_mutex_trylock(&mutex_);
-    // Return false if the lock is busy and locking failed.
-    if (result == EBUSY) {
-      return false;
-    }
-    ASSERT(result == 0);  // Verify no other errors.
-    return true;
-  }
-
- private:
-  pthread_mutex_t mutex_;   // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
-  return new QNXMutex();
-}
-
-
-class QNXSemaphore : public Semaphore {
- public:
-  explicit QNXSemaphore(int count) {  sem_init(&sem_, 0, count); }
-  virtual ~QNXSemaphore() { sem_destroy(&sem_); }
-
-  virtual void Wait();
-  virtual bool Wait(int timeout);
-  virtual void Signal() { sem_post(&sem_); }
- private:
-  sem_t sem_;
-};
-
-
-void QNXSemaphore::Wait() {
-  while (true) {
-    int result = sem_wait(&sem_);
-    if (result == 0) return;  // Successfully got semaphore.
-    CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
-  }
-}
-
-
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv, ts) do {                            \
-    (ts)->tv_sec = (tv)->tv_sec;                                    \
-    (ts)->tv_nsec = (tv)->tv_usec * 1000;                           \
-} while (false)
-#endif
-
-
-bool QNXSemaphore::Wait(int timeout) {
-  const long kOneSecondMicros = 1000000;  // NOLINT
-
-  // Split timeout into second and nanosecond parts.
-  struct timeval delta;
-  delta.tv_usec = timeout % kOneSecondMicros;
-  delta.tv_sec = timeout / kOneSecondMicros;
-
-  struct timeval current_time;
-  // Get the current time.
-  if (gettimeofday(&current_time, NULL) == -1) {
-    return false;
-  }
-
-  // Calculate time for end of timeout.
-  struct timeval end_time;
-  timeradd(&current_time, &delta, &end_time);
-
-  struct timespec ts;
-  TIMEVAL_TO_TIMESPEC(&end_time, &ts);
-  // Wait for semaphore signalled or timeout.
-  while (true) {
-    int result = sem_timedwait(&sem_, &ts);
-    if (result == 0) return true;  // Successfully got semaphore.
-    if (result == -1 && errno == ETIMEDOUT) return false;  // Timeout.
-    CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
-  }
-}
-
-
-Semaphore* OS::CreateSemaphore(int count) {
-  return new QNXSemaphore(count);
-}
-
-
-static int GetThreadID() {
-  pthread_t thread_id = pthread_self();
-  return thread_id;
-}
-
-
-static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
-  USE(info);
-  if (signal != SIGPROF) return;
-  Isolate* isolate = Isolate::UncheckedCurrent();
-  if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
-    // We require a fully initialized and entered isolate.
-    return;
-  }
-  if (v8::Locker::IsActive() &&
-      !isolate->thread_manager()->IsLockedByCurrentThread()) {
-    return;
-  }
-
-  Sampler* sampler = isolate->logger()->sampler();
-  if (sampler == NULL || !sampler->IsActive()) return;
-
-  TickSample sample_obj;
-  TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
-  if (sample == NULL) sample = &sample_obj;
-
-  // Extracting the sample from the context is extremely machine dependent.
-  ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
-  mcontext_t& mcontext = ucontext->uc_mcontext;
-  sample->state = isolate->current_vm_state();
-#if V8_HOST_ARCH_IA32
-  sample->pc = reinterpret_cast<Address>(mcontext.cpu.eip);
-  sample->sp = reinterpret_cast<Address>(mcontext.cpu.esp);
-  sample->fp = reinterpret_cast<Address>(mcontext.cpu.ebp);
-#elif V8_HOST_ARCH_X64
-  sample->pc = reinterpret_cast<Address>(mcontext.cpu.rip);
-  sample->sp = reinterpret_cast<Address>(mcontext.cpu.rsp);
-  sample->fp = reinterpret_cast<Address>(mcontext.cpu.rbp);
-#elif V8_HOST_ARCH_ARM
-  sample->pc = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_PC]);
-  sample->sp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_SP]);
-  sample->fp = reinterpret_cast<Address>(mcontext.cpu.gpr[ARM_REG_FP]);
-#endif
-  sampler->SampleStack(sample);
-  sampler->Tick(sample);
-}
-
-
-class Sampler::PlatformData : public Malloced {
- public:
-  PlatformData() : vm_tid_(GetThreadID()) {}
-
-  int vm_tid() const { return vm_tid_; }
-
- private:
-  const int vm_tid_;
-};
-
-
-class SignalSender : public Thread {
- public:
-  enum SleepInterval {
-    HALF_INTERVAL,
-    FULL_INTERVAL
-  };
-
-  static const int kSignalSenderStackSize = 32 * KB;
-
-  explicit SignalSender(int interval)
-      : Thread("SignalSender"),
-        vm_tgid_(getpid()),
-        interval_(interval) {}
-
-  static void InstallSignalHandler() {
-    struct sigaction sa;
-    sa.sa_sigaction = ProfilerSignalHandler;
-    sigemptyset(&sa.sa_mask);
-    sa.sa_flags = SA_SIGINFO;
-    signal_handler_installed_ =
-        (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
-  }
-
-  static void RestoreSignalHandler() {
-    if (signal_handler_installed_) {
-      sigaction(SIGPROF, &old_signal_handler_, 0);
-      signal_handler_installed_ = false;
-    }
-  }
-
-  static void AddActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
-    SamplerRegistry::AddActiveSampler(sampler);
-    if (instance_ == NULL) {
-      // Start a thread that will send SIGPROF signal to VM threads,
-      // when CPU profiling will be enabled.
-      instance_ = new SignalSender(sampler->interval());
-      instance_->Start();
-    } else {
-      ASSERT(instance_->interval_ == sampler->interval());
-    }
-  }
-
-  static void RemoveActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_);
-    SamplerRegistry::RemoveActiveSampler(sampler);
-    if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
-      RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
-      delete instance_;
-      instance_ = NULL;
-      RestoreSignalHandler();
-    }
-  }
-
-  // Implement Thread::Run().
-  virtual void Run() {
-    SamplerRegistry::State state;
-    while ((state = SamplerRegistry::GetState()) !=
-           SamplerRegistry::HAS_NO_SAMPLERS) {
-      bool cpu_profiling_enabled =
-          (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
-      bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
-      if (cpu_profiling_enabled && !signal_handler_installed_) {
-        InstallSignalHandler();
-      } else if (!cpu_profiling_enabled && signal_handler_installed_) {
-        RestoreSignalHandler();
-      }
-      // When CPU profiling is enabled both JavaScript and C++ code is
-      // profiled. We must not suspend.
-      if (!cpu_profiling_enabled) {
-        if (rate_limiter_.SuspendIfNecessary()) continue;
-      }
-      if (cpu_profiling_enabled && runtime_profiler_enabled) {
-        if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
-          return;
-        }
-        Sleep(HALF_INTERVAL);
-        if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
-          return;
-        }
-        Sleep(HALF_INTERVAL);
-      } else {
-        if (cpu_profiling_enabled) {
-          if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
-                                                      this)) {
-            return;
-          }
-        }
-        if (runtime_profiler_enabled) {
-          if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
-                                                      NULL)) {
-            return;
-          }
-        }
-        Sleep(FULL_INTERVAL);
-      }
-    }
-  }
-
-  static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
-    if (!sampler->IsProfiling()) return;
-    SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
-    sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
-  }
-
-  static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
-    if (!sampler->isolate()->IsInitialized()) return;
-    sampler->isolate()->runtime_profiler()->NotifyTick();
-  }
-
-  void SendProfilingSignal(int tid) {
-    if (!signal_handler_installed_) return;
-    pthread_kill(tid, SIGPROF);
-  }
-
-  void Sleep(SleepInterval full_or_half) {
-    // Convert ms to us and subtract 100 us to compensate delays
-    // occuring during signal delivery.
-    useconds_t interval = interval_ * 1000 - 100;
-    if (full_or_half == HALF_INTERVAL) interval /= 2;
-    int result = usleep(interval);
-#ifdef DEBUG
-    if (result != 0 && errno != EINTR) {
-      fprintf(stderr,
-              "SignalSender usleep error; interval = %u, errno = %d\n",
-              interval,
-              errno);
-      ASSERT(result == 0 || errno == EINTR);
-    }
-#endif
-    USE(result);
-  }
-
-  const int vm_tgid_;
-  const int interval_;
-  RuntimeProfilerRateLimiter rate_limiter_;
-
-  // Protects the process wide state below.
-  static Mutex* mutex_;
-  static SignalSender* instance_;
-  static bool signal_handler_installed_;
-  static struct sigaction old_signal_handler_;
-
-  DISALLOW_COPY_AND_ASSIGN(SignalSender);
-};
-
-
-Mutex* SignalSender::mutex_ = OS::CreateMutex();
-SignalSender* SignalSender::instance_ = NULL;
-struct sigaction SignalSender::old_signal_handler_;
-bool SignalSender::signal_handler_installed_ = false;
-
-
-Sampler::Sampler(Isolate* isolate, int interval)
-    : isolate_(isolate),
-      interval_(interval),
-      profiling_(false),
-      active_(false),
-      samples_taken_(0) {
-  data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
-  ASSERT(!IsActive());
-  delete data_;
-}
-
-
-void Sampler::Start() {
-  ASSERT(!IsActive());
-  SetActive(true);
-  SignalSender::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
-  ASSERT(IsActive());
-  SignalSender::RemoveActiveSampler(this);
-  SetActive(false);
-}
-
-
-} }  // namespace v8::internal
index be8bbfc..4248ea2 100644 (file)
@@ -487,12 +487,10 @@ void Thread::set_name(const char* name) {
 
 
 void Thread::Start() {
-  pthread_attr_t* attr_ptr = NULL;
   pthread_attr_t attr;
   if (stack_size_ > 0) {
     pthread_attr_init(&attr);
     pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
-    attr_ptr = &attr;
   }
   pthread_create(&data_->thread_, NULL, ThreadEntry, this);
   ASSERT(data_->thread_ != kNoThread);
@@ -712,11 +710,8 @@ class SignalSender : public Thread {
       : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
         interval_(interval) {}
 
-  static void SetUp() {
-    if (!mutex_) {
-      mutex_ = OS::CreateMutex();
-    }
-  }
+  static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
+  static void TearDown() { delete mutex_; }
 
   static void InstallSignalHandler() {
     struct sigaction sa;
@@ -870,6 +865,12 @@ void OS::SetUp() {
 }
 
 
+void OS::TearDown() {
+  SignalSender::TearDown();
+  delete limit_mutex;
+}
+
+
 Sampler::Sampler(Isolate* isolate, int interval)
     : isolate_(isolate),
       interval_(interval),
index aa16c85..2473949 100644 (file)
@@ -1848,14 +1848,26 @@ bool Win32Socket::Shutdown() {
 
 
 int Win32Socket::Send(const char* data, int len) const {
-  int status = send(socket_, data, len, 0);
-  return status;
+  if (len <= 0) return 0;
+  int written = 0;
+  while (written < len) {
+    int status = send(socket_, data + written, len - written, 0);
+    if (status == 0) {
+      break;
+    } else if (status > 0) {
+      written += status;
+    } else {
+      return 0;
+    }
+  }
+  return written;
 }
 
 
 int Win32Socket::Receive(char* data, int len) const {
+  if (len <= 0) return 0;
   int status = recv(socket_, data, len, 0);
-  return status;
+  return (status == SOCKET_ERROR) ? 0 : status;
 }
 
 
@@ -1949,11 +1961,8 @@ class SamplerThread : public Thread {
       : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
         interval_(interval) {}
 
-  static void SetUp() {
-    if (!mutex_) {
-      mutex_ = OS::CreateMutex();
-    }
-  }
+  static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
+  static void TearDown() { delete mutex_; }
 
   static void AddActiveSampler(Sampler* sampler) {
     ScopedLock lock(mutex_);
@@ -2078,6 +2087,12 @@ void OS::SetUp() {
 }
 
 
+void OS::TearDown() {
+  SamplerThread::TearDown();
+  delete limit_mutex;
+}
+
+
 Sampler::Sampler(Isolate* isolate, int interval)
     : isolate_(isolate),
       interval_(interval),
index 3b2aa3c..a2ddf7a 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -123,6 +123,9 @@ class OS {
   // called after CPU initialization.
   static void PostSetUp();
 
+  // Clean up platform-OS-related things. Called once at VM shutdown.
+  static void TearDown();
+
   // Returns the accumulated user time for thread. This routine
   // can be used for profiling. The implementation should
   // strive for high-precision timer resolution, preferable
@@ -650,6 +653,7 @@ class Socket {
   virtual bool Shutdown() = 0;
 
   // Data Transimission
+  // Return 0 on failure.
   virtual int Send(const char* data, int len) const = 0;
   virtual int Receive(char* data, int len) const = 0;
 
index 20d3b9c..0c17eec 100644 (file)
@@ -581,9 +581,8 @@ PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
   ParseExpression(true, CHECK_OK);
   Expect(i::Token::RPAREN, CHECK_OK);
 
-  scope_->EnterWith();
+  Scope::InsideWith iw(scope_);
   ParseStatement(CHECK_OK);
-  scope_->LeaveWith();
   return Statement::Default();
 }
 
@@ -749,10 +748,9 @@ PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
       return Statement::Default();
     }
     Expect(i::Token::RPAREN, CHECK_OK);
-    scope_->EnterWith();
-    ParseBlock(ok);
-    scope_->LeaveWith();
-    if (!*ok) Statement::Default();
+    { Scope::InsideWith iw(scope_);
+      ParseBlock(CHECK_OK);
+    }
     catch_or_finally_seen = true;
   }
   if (peek() == i::Token::FINALLY) {
index f3a4347..13261f7 100644 (file)
@@ -470,8 +470,19 @@ class PreParser {
     void set_language_mode(i::LanguageMode language_mode) {
       language_mode_ = language_mode;
     }
-    void EnterWith() { with_nesting_count_++; }
-    void LeaveWith() { with_nesting_count_--; }
+
+    class InsideWith {
+     public:
+      explicit InsideWith(Scope* scope) : scope_(scope) {
+        scope->with_nesting_count_++;
+      }
+
+      ~InsideWith() { scope_->with_nesting_count_--; }
+
+     private:
+      Scope* scope_;
+      DISALLOW_COPY_AND_ASSIGN(InsideWith);
+    };
 
    private:
     Scope** const variable_;
index 16eb85a..0d8dadc 100644 (file)
@@ -672,9 +672,6 @@ void AstPrinter::PrintLiteralWithModeIndented(const char* info,
     EmbeddedVector<char, 256> buf;
     int pos = OS::SNPrintF(buf, "%s (mode = %s", info,
                            Variable::Mode2String(var->mode()));
-    if (var->is_qml_global()) {
-      pos += OS::SNPrintF(buf + pos, ":QML");
-    }
     OS::SNPrintF(buf + pos, ")");
     PrintLiteralIndented(buf.start(), value, true);
   }
index 65369be..9afc52f 100644 (file)
@@ -95,6 +95,55 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
 }
 
 
+HeapEntry* HeapGraphEdge::from() const {
+  return &snapshot()->entries()[from_index_];
+}
+
+
+HeapSnapshot* HeapGraphEdge::snapshot() const {
+  return to_entry_->snapshot();
+}
+
+
+int HeapEntry::index() const {
+  return static_cast<int>(this - &snapshot_->entries().first());
+}
+
+
+int HeapEntry::set_children_index(int index) {
+  children_index_ = index;
+  int next_index = index + children_count_;
+  children_count_ = 0;
+  return next_index;
+}
+
+
+int HeapEntry::set_retainers_index(int index) {
+  retainers_index_ = index;
+  int next_index = index + retainers_count_;
+  retainers_count_ = 0;
+  return next_index;
+}
+
+
+HeapGraphEdge** HeapEntry::children_arr() {
+  ASSERT(children_index_ >= 0);
+  return &snapshot_->children()[children_index_];
+}
+
+
+HeapGraphEdge** HeapEntry::retainers_arr() {
+  ASSERT(retainers_index_ >= 0);
+  return &snapshot_->retainers()[retainers_index_];
+}
+
+
+HeapEntry* HeapEntry::dominator() const {
+  ASSERT(dominator_ >= 0);
+  return &snapshot_->entries()[dominator_];
+}
+
+
 SnapshotObjectId HeapObjectsMap::GetNthGcSubrootId(int delta) {
   return kGcRootsFirstSubrootId + delta * kObjectIdStep;
 }
index 9bea610..da2a969 100644 (file)
@@ -34,6 +34,7 @@
 #include "scopeinfo.h"
 #include "unicode.h"
 #include "zone-inl.h"
+#include "debug.h"
 
 namespace v8 {
 namespace internal {
@@ -930,82 +931,71 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
 }
 
 
-void HeapGraphEdge::Init(
-    int child_index, Type type, const char* name, HeapEntry* to) {
+HeapGraphEdge::HeapGraphEdge(Type type, const char* name, int from, int to)
+    : type_(type),
+      from_index_(from),
+      to_index_(to),
+      name_(name) {
   ASSERT(type == kContextVariable
-         || type == kProperty
-         || type == kInternal
-         || type == kShortcut);
-  child_index_ = child_index;
-  type_ = type;
-  name_ = name;
-  to_ = to;
+      || type == kProperty
+      || type == kInternal
+      || type == kShortcut);
 }
 
 
-void HeapGraphEdge::Init(int child_index, Type type, int index, HeapEntry* to) {
+HeapGraphEdge::HeapGraphEdge(Type type, int index, int from, int to)
+    : type_(type),
+      from_index_(from),
+      to_index_(to),
+      index_(index) {
   ASSERT(type == kElement || type == kHidden || type == kWeak);
-  child_index_ = child_index;
-  type_ = type;
-  index_ = index;
-  to_ = to;
 }
 
 
-void HeapGraphEdge::Init(int child_index, int index, HeapEntry* to) {
-  Init(child_index, kElement, index, to);
+void HeapGraphEdge::ReplaceToIndexWithEntry(HeapSnapshot* snapshot) {
+  to_entry_ = &snapshot->entries()[to_index_];
 }
 
 
-HeapEntry* HeapGraphEdge::From() {
-  return reinterpret_cast<HeapEntry*>(this - child_index_) - 1;
-}
-
+const int HeapEntry::kNoEntry = -1;
 
-void HeapEntry::Init(HeapSnapshot* snapshot,
+HeapEntry::HeapEntry(HeapSnapshot* snapshot,
                      Type type,
                      const char* name,
                      SnapshotObjectId id,
-                     int self_size,
-                     int children_count,
-                     int retainers_count) {
-  snapshot_ = snapshot;
-  type_ = type;
-  painted_ = false;
-  name_ = name;
-  self_size_ = self_size;
-  retained_size_ = 0;
-  entry_index_ = -1;
-  children_count_ = children_count;
-  retainers_count_ = retainers_count;
-  dominator_ = NULL;
-  id_ = id;
-}
+                     int self_size)
+    : painted_(false),
+      user_reachable_(false),
+      dominator_(kNoEntry),
+      type_(type),
+      retainers_count_(0),
+      retainers_index_(-1),
+      children_count_(0),
+      children_index_(-1),
+      self_size_(self_size),
+      retained_size_(0),
+      id_(id),
+      snapshot_(snapshot),
+      name_(name) { }
 
 
 void HeapEntry::SetNamedReference(HeapGraphEdge::Type type,
-                                  int child_index,
                                   const char* name,
-                                  HeapEntry* entry,
-                                  int retainer_index) {
-  children()[child_index].Init(child_index, type, name, entry);
-  entry->retainers()[retainer_index] = children_arr() + child_index;
+                                  HeapEntry* entry) {
+  HeapGraphEdge edge(type, name, this->index(), entry->index());
+  snapshot_->edges().Add(edge);
+  ++children_count_;
+  ++entry->retainers_count_;
 }
 
 
 void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type,
-                                    int child_index,
                                     int index,
-                                    HeapEntry* entry,
-                                    int retainer_index) {
-  children()[child_index].Init(child_index, type, index, entry);
-  entry->retainers()[retainer_index] = children_arr() + child_index;
-}
-
-
-void HeapEntry::SetUnidirElementReference(
-    int child_index, int index, HeapEntry* entry) {
-  children()[child_index].Init(child_index, index, entry);
+                                    HeapEntry* entry) {
+  HeapGraphEdge edge(type, index, this->index(), entry->index());
+  snapshot_->edges().Add(edge);
+  ++children_count_;
+  ++entry->retainers_count_;
 }
 
 
@@ -1016,7 +1006,8 @@ Handle<HeapObject> HeapEntry::GetHeapObject() {
 
 void HeapEntry::Print(
     const char* prefix, const char* edge_name, int max_depth, int indent) {
-  OS::Print("%6d %7d @%6llu %*c %s%s: ",
+  STATIC_CHECK(sizeof(unsigned) == sizeof(id()));
+  OS::Print("%6d %7d @%6u %*c %s%s: ",
             self_size(), retained_size(), id(),
             indent, ' ', prefix, edge_name);
   if (type() != kString) {
@@ -1034,9 +1025,9 @@ void HeapEntry::Print(
     OS::Print("\"\n");
   }
   if (--max_depth == 0) return;
-  Vector<HeapGraphEdge> ch = children();
+  Vector<HeapGraphEdge*> ch = children();
   for (int i = 0; i < ch.length(); ++i) {
-    HeapGraphEdge& edge = ch[i];
+    HeapGraphEdge& edge = *ch[i];
     const char* edge_prefix = "";
     EmbeddedVector<char, 64> index;
     const char* edge_name = index.start();
@@ -1092,15 +1083,6 @@ const char* HeapEntry::TypeAsString() {
 }
 
 
-size_t HeapEntry::EntriesSize(int entries_count,
-                              int children_count,
-                              int retainers_count) {
-  return sizeof(HeapEntry) * entries_count         // NOLINT
-      + sizeof(HeapGraphEdge) * children_count     // NOLINT
-      + sizeof(HeapGraphEdge*) * retainers_count;  // NOLINT
-}
-
-
 // It is very important to keep objects that form a heap snapshot
 // as small as possible.
 namespace {  // Avoid littering the global namespace.
@@ -1109,7 +1091,7 @@ template <size_t ptr_size> struct SnapshotSizeConstants;
 
 template <> struct SnapshotSizeConstants<4> {
   static const int kExpectedHeapGraphEdgeSize = 12;
-  static const int kExpectedHeapEntrySize = 36;
+  static const int kExpectedHeapEntrySize = 40;
   static const size_t kMaxSerializableSnapshotRawSize = 256 * MB;
 };
 
@@ -1130,10 +1112,9 @@ HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
       type_(type),
       title_(title),
       uid_(uid),
-      root_entry_(NULL),
-      gc_roots_entry_(NULL),
-      natives_root_entry_(NULL),
-      raw_entries_(NULL),
+      root_index_(HeapEntry::kNoEntry),
+      gc_roots_index_(HeapEntry::kNoEntry),
+      natives_root_index_(HeapEntry::kNoEntry),
       max_snapshot_js_object_id_(0) {
   STATIC_CHECK(
       sizeof(HeapGraphEdge) ==
@@ -1142,16 +1123,11 @@ HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
       sizeof(HeapEntry) ==
       SnapshotSizeConstants<kPointerSize>::kExpectedHeapEntrySize);
   for (int i = 0; i < VisitorSynchronization::kNumberOfSyncTags; ++i) {
-    gc_subroot_entries_[i] = NULL;
+    gc_subroot_indexes_[i] = HeapEntry::kNoEntry;
   }
 }
 
 
-HeapSnapshot::~HeapSnapshot() {
-  DeleteArray(raw_entries_);
-}
-
-
 void HeapSnapshot::Delete() {
   collection_->RemoveSnapshot(this);
   delete this;
@@ -1163,18 +1139,8 @@ void HeapSnapshot::RememberLastJSObjectId() {
 }
 
 
-void HeapSnapshot::AllocateEntries(int entries_count,
-                                   int children_count,
-                                   int retainers_count) {
-  ASSERT(raw_entries_ == NULL);
-  raw_entries_size_ =
-      HeapEntry::EntriesSize(entries_count, children_count, retainers_count);
-  raw_entries_ = NewArray<char>(raw_entries_size_);
-}
-
-
-static void HeapEntryClearPaint(HeapEntry** entry_ptr) {
-  (*entry_ptr)->clear_paint();
+static void HeapEntryClearPaint(HeapEntry* entry_ptr) {
+  entry_ptr->clear_paint();
 }
 
 
@@ -1183,97 +1149,102 @@ void HeapSnapshot::ClearPaint() {
 }
 
 
-HeapEntry* HeapSnapshot::AddRootEntry(int children_count) {
-  ASSERT(root_entry_ == NULL);
+HeapEntry* HeapSnapshot::AddRootEntry() {
+  ASSERT(root_index_ == HeapEntry::kNoEntry);
   ASSERT(entries_.is_empty());  // Root entry must be the first one.
-  return (root_entry_ = AddEntry(HeapEntry::kObject,
-                                 "",
-                                 HeapObjectsMap::kInternalRootObjectId,
-                                 0,
-                                 children_count,
-                                 0));
+  HeapEntry* entry = AddEntry(HeapEntry::kObject,
+                              "",
+                              HeapObjectsMap::kInternalRootObjectId,
+                              0);
+  root_index_ = entry->index();
+  ASSERT(root_index_ == 0);
+  return entry;
 }
 
 
-HeapEntry* HeapSnapshot::AddGcRootsEntry(int children_count,
-                                         int retainers_count) {
-  ASSERT(gc_roots_entry_ == NULL);
-  return (gc_roots_entry_ = AddEntry(HeapEntry::kObject,
-                                     "(GC roots)",
-                                     HeapObjectsMap::kGcRootsObjectId,
-                                     0,
-                                     children_count,
-                                     retainers_count));
+HeapEntry* HeapSnapshot::AddGcRootsEntry() {
+  ASSERT(gc_roots_index_ == HeapEntry::kNoEntry);
+  HeapEntry* entry = AddEntry(HeapEntry::kObject,
+                              "(GC roots)",
+                              HeapObjectsMap::kGcRootsObjectId,
+                              0);
+  gc_roots_index_ = entry->index();
+  return entry;
 }
 
 
-HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag,
-                                           int children_count,
-                                           int retainers_count) {
-  ASSERT(gc_subroot_entries_[tag] == NULL);
+HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) {
+  ASSERT(gc_subroot_indexes_[tag] == HeapEntry::kNoEntry);
   ASSERT(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags);
-  return (gc_subroot_entries_[tag] = AddEntry(
+  HeapEntry* entry = AddEntry(
       HeapEntry::kObject,
       VisitorSynchronization::kTagNames[tag],
       HeapObjectsMap::GetNthGcSubrootId(tag),
-      0,
-      children_count,
-      retainers_count));
+      0);
+  gc_subroot_indexes_[tag] = entry->index();
+  return entry;
 }
 
 
 HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
                                   const char* name,
                                   SnapshotObjectId id,
-                                  int size,
-                                  int children_count,
-                                  int retainers_count) {
-  HeapEntry* entry = GetNextEntryToInit();
-  entry->Init(this, type, name, id, size, children_count, retainers_count);
-  return entry;
+                                  int size) {
+  HeapEntry entry(this, type, name, id, size);
+  entries_.Add(entry);
+  return &entries_.last();
+}
+
+
+void HeapSnapshot::FillChildrenAndRetainers() {
+  ASSERT(children().is_empty());
+  children().Allocate(edges().length());
+  ASSERT(retainers().is_empty());
+  retainers().Allocate(edges().length());
+  int children_index = 0;
+  int retainers_index = 0;
+  for (int i = 0; i < entries().length(); ++i) {
+    HeapEntry* entry = &entries()[i];
+    children_index = entry->set_children_index(children_index);
+    retainers_index = entry->set_retainers_index(retainers_index);
+  }
+  ASSERT(edges().length() == children_index);
+  ASSERT(edges().length() == retainers_index);
+  for (int i = 0; i < edges().length(); ++i) {
+    HeapGraphEdge* edge = &edges()[i];
+    edge->ReplaceToIndexWithEntry(this);
+    edge->from()->add_child(edge);
+    edge->to()->add_retainer(edge);
+  }
 }
 
 
 void HeapSnapshot::SetDominatorsToSelf() {
   for (int i = 0; i < entries_.length(); ++i) {
-    HeapEntry* entry = entries_[i];
-    if (entry->dominator() == NULL) entry->set_dominator(entry);
+    entries_[i].set_dominator(&entries_[i]);
   }
 }
 
 
-HeapEntry* HeapSnapshot::GetNextEntryToInit() {
-  if (entries_.length() > 0) {
-    HeapEntry* last_entry = entries_.last();
-    entries_.Add(reinterpret_cast<HeapEntry*>(
-        reinterpret_cast<char*>(last_entry) + last_entry->EntrySize()));
-  } else {
-    entries_.Add(reinterpret_cast<HeapEntry*>(raw_entries_));
+class FindEntryById {
+ public:
+  explicit FindEntryById(SnapshotObjectId id) : id_(id) { }
+  int operator()(HeapEntry* const* entry) {
+    if ((*entry)->id() == id_) return 0;
+    return (*entry)->id() < id_ ? -1 : 1;
   }
-  ASSERT(reinterpret_cast<char*>(entries_.last()) <
-         (raw_entries_ + raw_entries_size_));
-  return entries_.last();
-}
+ private:
+  SnapshotObjectId id_;
+};
 
 
 HeapEntry* HeapSnapshot::GetEntryById(SnapshotObjectId id) {
   List<HeapEntry*>* entries_by_id = GetSortedEntriesList();
-
   // Perform a binary search by id.
-  int low = 0;
-  int high = entries_by_id->length() - 1;
-  while (low <= high) {
-    int mid =
-        (static_cast<unsigned int>(low) + static_cast<unsigned int>(high)) >> 1;
-    SnapshotObjectId mid_id = entries_by_id->at(mid)->id();
-    if (mid_id > id)
-      high = mid - 1;
-    else if (mid_id < id)
-      low = mid + 1;
-    else
-      return entries_by_id->at(mid);
-  }
-  return NULL;
+  int index = SortedListBSearch(*entries_by_id, FindEntryById(id));
+  if (index == -1)
+    return NULL;
+  return entries_by_id->at(index);
 }
 
 
@@ -1287,7 +1258,10 @@ static int SortByIds(const T* entry1_ptr,
 
 List<HeapEntry*>* HeapSnapshot::GetSortedEntriesList() {
   if (sorted_entries_.is_empty()) {
-    sorted_entries_.AddAll(entries_);
+    sorted_entries_.Allocate(entries_.length());
+    for (int i = 0; i < entries_.length(); ++i) {
+      sorted_entries_[i] = &entries_[i];
+    }
     sorted_entries_.Sort(SortByIds);
   }
   return &sorted_entries_;
@@ -1299,6 +1273,22 @@ void HeapSnapshot::Print(int max_depth) {
 }
 
 
+template<typename T, class P>
+static size_t GetMemoryUsedByList(const List<T, P>& list) {
+  return list.capacity() * sizeof(T);
+}
+
+
+size_t HeapSnapshot::RawSnapshotSize() const {
+  return
+      GetMemoryUsedByList(entries_) +
+      GetMemoryUsedByList(edges_) +
+      GetMemoryUsedByList(children_) +
+      GetMemoryUsedByList(retainers_) +
+      GetMemoryUsedByList(sorted_entries_);
+}
+
+
 // We split IDs on evens for embedder objects (see
 // HeapObjectsMap::GenerateId) and odds for native objects.
 const SnapshotObjectId HeapObjectsMap::kInternalRootObjectId = 1;
@@ -1311,96 +1301,166 @@ const SnapshotObjectId HeapObjectsMap::kFirstAvailableObjectId =
     VisitorSynchronization::kNumberOfSyncTags * HeapObjectsMap::kObjectIdStep;
 
 HeapObjectsMap::HeapObjectsMap()
-    : initial_fill_mode_(true),
-      next_id_(kFirstAvailableObjectId),
-      entries_map_(AddressesMatch),
-      entries_(new List<EntryInfo>()) { }
-
-
-HeapObjectsMap::~HeapObjectsMap() {
-  delete entries_;
+    : next_id_(kFirstAvailableObjectId),
+      entries_map_(AddressesMatch) {
+  // This dummy element solves a problem with entries_map_.
+  // When we do lookup in HashMap we see no difference between two cases:
+  // it has an entry with NULL as the value or it has created
+  // a new entry on the fly with NULL as the default value.
+  // With such dummy element we have a guaranty that all entries_map_ entries
+  // will have the value field grater than 0.
+  // This fact is using in MoveObject method.
+  entries_.Add(EntryInfo(0, NULL, 0));
 }
 
 
 void HeapObjectsMap::SnapshotGenerationFinished() {
-  initial_fill_mode_ = false;
   RemoveDeadEntries();
 }
 
 
-SnapshotObjectId HeapObjectsMap::FindObject(Address addr) {
-  if (!initial_fill_mode_) {
-    SnapshotObjectId existing = FindEntry(addr);
-    if (existing != 0) return existing;
-  }
-  SnapshotObjectId id = next_id_;
-  next_id_ += kObjectIdStep;
-  AddEntry(addr, id);
-  return id;
-}
-
-
 void HeapObjectsMap::MoveObject(Address from, Address to) {
+  ASSERT(to != NULL);
+  ASSERT(from != NULL);
   if (from == to) return;
-  HashMap::Entry* entry = entries_map_.Lookup(from, AddressHash(from), false);
-  if (entry != NULL) {
-    void* value = entry->value;
-    entries_map_.Remove(from, AddressHash(from));
-    if (to != NULL) {
-      entry = entries_map_.Lookup(to, AddressHash(to), true);
-      // We can have an entry at the new location, it is OK, as GC can overwrite
-      // dead objects with alive objects being moved.
-      entry->value = value;
-    }
+  void* from_value = entries_map_.Remove(from, AddressHash(from));
+  if (from_value == NULL) return;
+  int from_entry_info_index =
+      static_cast<int>(reinterpret_cast<intptr_t>(from_value));
+  entries_.at(from_entry_info_index).addr = to;
+  HashMap::Entry* to_entry = entries_map_.Lookup(to, AddressHash(to), true);
+  if (to_entry->value != NULL) {
+    int to_entry_info_index =
+        static_cast<int>(reinterpret_cast<intptr_t>(to_entry->value));
+    // Without this operation we will have two EntryInfo's with the same
+    // value in addr field. It is bad because later at RemoveDeadEntries
+    // one of this entry will be removed with the corresponding entries_map_
+    // entry.
+    entries_.at(to_entry_info_index).addr = NULL;
   }
+  to_entry->value = reinterpret_cast<void*>(from_entry_info_index);
 }
 
 
-void HeapObjectsMap::AddEntry(Address addr, SnapshotObjectId id) {
-  HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), true);
-  ASSERT(entry->value == NULL);
-  entry->value = reinterpret_cast<void*>(entries_->length());
-  entries_->Add(EntryInfo(id));
+SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) {
+  HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), false);
+  if (entry == NULL) return 0;
+  int entry_index = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
+  EntryInfo& entry_info = entries_.at(entry_index);
+  ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
+  return entry_info.id;
 }
 
 
-SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) {
-  HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), false);
-  if (entry != NULL) {
+SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr,
+                                                unsigned int size) {
+  ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
+  HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), true);
+  if (entry->value != NULL) {
     int entry_index =
         static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
-    EntryInfo& entry_info = entries_->at(entry_index);
+    EntryInfo& entry_info = entries_.at(entry_index);
     entry_info.accessed = true;
+    entry_info.size = size;
     return entry_info.id;
-  } else {
-    return 0;
   }
+  entry->value = reinterpret_cast<void*>(entries_.length());
+  SnapshotObjectId id = next_id_;
+  next_id_ += kObjectIdStep;
+  entries_.Add(EntryInfo(id, addr, size));
+  ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
+  return id;
+}
+
+
+void HeapObjectsMap::StopHeapObjectsTracking() {
+  time_intervals_.Clear();
+}
+
+void HeapObjectsMap::UpdateHeapObjectsMap() {
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+                          "HeapSnapshotsCollection::UpdateHeapObjectsMap");
+  HeapIterator iterator;
+  for (HeapObject* obj = iterator.next();
+       obj != NULL;
+       obj = iterator.next()) {
+    FindOrAddEntry(obj->address(), obj->Size());
+  }
+  RemoveDeadEntries();
+}
+
+
+void HeapObjectsMap::PushHeapObjectsStats(OutputStream* stream) {
+  UpdateHeapObjectsMap();
+  time_intervals_.Add(TimeInterval(next_id_));
+  int prefered_chunk_size = stream->GetChunkSize();
+  List<v8::HeapStatsUpdate> stats_buffer;
+  ASSERT(!entries_.is_empty());
+  EntryInfo* entry_info = &entries_.first();
+  EntryInfo* end_entry_info = &entries_.last() + 1;
+  for (int time_interval_index = 0;
+       time_interval_index < time_intervals_.length();
+       ++time_interval_index) {
+    TimeInterval& time_interval = time_intervals_[time_interval_index];
+    SnapshotObjectId time_interval_id = time_interval.id;
+    uint32_t entries_size = 0;
+    EntryInfo* start_entry_info = entry_info;
+    while (entry_info < end_entry_info && entry_info->id < time_interval_id) {
+      entries_size += entry_info->size;
+      ++entry_info;
+    }
+    uint32_t entries_count =
+        static_cast<uint32_t>(entry_info - start_entry_info);
+    if (time_interval.count != entries_count ||
+        time_interval.size != entries_size) {
+      stats_buffer.Add(v8::HeapStatsUpdate(
+          time_interval_index,
+          time_interval.count = entries_count,
+          time_interval.size = entries_size));
+      if (stats_buffer.length() >= prefered_chunk_size) {
+        OutputStream::WriteResult result = stream->WriteHeapStatsChunk(
+            &stats_buffer.first(), stats_buffer.length());
+        if (result == OutputStream::kAbort) return;
+        stats_buffer.Clear();
+      }
+    }
+  }
+  ASSERT(entry_info == end_entry_info);
+  if (!stats_buffer.is_empty()) {
+    OutputStream::WriteResult result = stream->WriteHeapStatsChunk(
+        &stats_buffer.first(), stats_buffer.length());
+    if (result == OutputStream::kAbort) return;
+  }
+  stream->EndOfStream();
 }
 
 
 void HeapObjectsMap::RemoveDeadEntries() {
-  List<EntryInfo>* new_entries = new List<EntryInfo>();
-  List<void*> dead_entries;
-  for (HashMap::Entry* entry = entries_map_.Start();
-       entry != NULL;
-       entry = entries_map_.Next(entry)) {
-    int entry_index =
-        static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
-    EntryInfo& entry_info = entries_->at(entry_index);
+  ASSERT(entries_.length() > 0 &&
+         entries_.at(0).id == 0 &&
+         entries_.at(0).addr == NULL);
+  int first_free_entry = 1;
+  for (int i = 1; i < entries_.length(); ++i) {
+    EntryInfo& entry_info = entries_.at(i);
     if (entry_info.accessed) {
-      entry->value = reinterpret_cast<void*>(new_entries->length());
-      new_entries->Add(EntryInfo(entry_info.id, false));
+      if (first_free_entry != i) {
+        entries_.at(first_free_entry) = entry_info;
+      }
+      entries_.at(first_free_entry).accessed = false;
+      HashMap::Entry* entry = entries_map_.Lookup(
+          entry_info.addr, AddressHash(entry_info.addr), false);
+      ASSERT(entry);
+      entry->value = reinterpret_cast<void*>(first_free_entry);
+      ++first_free_entry;
     } else {
-      dead_entries.Add(entry->key);
+      if (entry_info.addr) {
+        entries_map_.Remove(entry_info.addr, AddressHash(entry_info.addr));
+      }
     }
   }
-  for (int i = 0; i < dead_entries.length(); ++i) {
-    void* raw_entry = dead_entries[i];
-    entries_map_.Remove(
-        raw_entry, AddressHash(reinterpret_cast<Address>(raw_entry)));
-  }
-  delete entries_;
-  entries_ = new_entries;
+  entries_.Rewind(first_free_entry);
+  ASSERT(static_cast<uint32_t>(entries_.length()) - 1 ==
+         entries_map_.occupancy());
 }
 
 
@@ -1487,7 +1547,7 @@ Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(
   for (HeapObject* obj = iterator.next();
        obj != NULL;
        obj = iterator.next()) {
-    if (ids_.FindObject(obj->address()) == id) {
+    if (ids_.FindEntry(obj->address()) == id) {
       ASSERT(object == NULL);
       object = obj;
       // Can't break -- kFilterUnreachable requires full heap traversal.
@@ -1497,99 +1557,22 @@ Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(
 }
 
 
-HeapEntry* const HeapEntriesMap::kHeapEntryPlaceholder =
-    reinterpret_cast<HeapEntry*>(1);
-
 HeapEntriesMap::HeapEntriesMap()
-    : entries_(HeapThingsMatch),
-      entries_count_(0),
-      total_children_count_(0),
-      total_retainers_count_(0) {
-}
-
-
-HeapEntriesMap::~HeapEntriesMap() {
-  for (HashMap::Entry* p = entries_.Start(); p != NULL; p = entries_.Next(p)) {
-    delete reinterpret_cast<EntryInfo*>(p->value);
-  }
+    : entries_(HeapThingsMatch) {
 }
 
 
-void HeapEntriesMap::AllocateHeapEntryForMapEntry(HashMap::Entry* map_entry) {
-    EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(map_entry->value);
-    entry_info->entry = entry_info->allocator->AllocateEntry(
-        map_entry->key,
-        entry_info->children_count,
-        entry_info->retainers_count);
-    ASSERT(entry_info->entry != NULL);
-    ASSERT(entry_info->entry != kHeapEntryPlaceholder);
-    entry_info->children_count = 0;
-    entry_info->retainers_count = 0;
-}
-
-
-void HeapEntriesMap::AllocateEntries(HeapThing root_object) {
-  HashMap::Entry* root_entry =
-      entries_.Lookup(root_object, Hash(root_object), false);
-  ASSERT(root_entry != NULL);
-  // Make sure root entry is allocated first.
-  AllocateHeapEntryForMapEntry(root_entry);
-  void* root_entry_value = root_entry->value;
-  // Remove the root object from map while iterating through other entries.
-  entries_.Remove(root_object, Hash(root_object));
-  root_entry = NULL;
-
-  for (HashMap::Entry* p = entries_.Start();
-       p != NULL;
-       p = entries_.Next(p)) {
-    AllocateHeapEntryForMapEntry(p);
-  }
-
-  // Insert root entry back.
-  root_entry = entries_.Lookup(root_object, Hash(root_object), true);
-  root_entry->value = root_entry_value;
-}
-
-
-HeapEntry* HeapEntriesMap::Map(HeapThing thing) {
+int HeapEntriesMap::Map(HeapThing thing) {
   HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), false);
-  if (cache_entry != NULL) {
-    EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(cache_entry->value);
-    return entry_info->entry;
-  } else {
-    return NULL;
-  }
+  if (cache_entry == NULL) return HeapEntry::kNoEntry;
+  return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
 }
 
 
-void HeapEntriesMap::Pair(
-    HeapThing thing, HeapEntriesAllocator* allocator, HeapEntry* entry) {
+void HeapEntriesMap::Pair(HeapThing thing, int entry) {
   HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), true);
   ASSERT(cache_entry->value == NULL);
-  cache_entry->value = new EntryInfo(entry, allocator);
-  ++entries_count_;
-}
-
-
-void HeapEntriesMap::CountReference(HeapThing from, HeapThing to,
-                                    int* prev_children_count,
-                                    int* prev_retainers_count) {
-  HashMap::Entry* from_cache_entry = entries_.Lookup(from, Hash(from), false);
-  HashMap::Entry* to_cache_entry = entries_.Lookup(to, Hash(to), false);
-  ASSERT(from_cache_entry != NULL);
-  ASSERT(to_cache_entry != NULL);
-  EntryInfo* from_entry_info =
-      reinterpret_cast<EntryInfo*>(from_cache_entry->value);
-  EntryInfo* to_entry_info =
-      reinterpret_cast<EntryInfo*>(to_cache_entry->value);
-  if (prev_children_count)
-    *prev_children_count = from_entry_info->children_count;
-  if (prev_retainers_count)
-    *prev_retainers_count = to_entry_info->retainers_count;
-  ++from_entry_info->children_count;
-  ++to_entry_info->retainers_count;
-  ++total_children_count_;
-  ++total_retainers_count_;
+  cache_entry->value = reinterpret_cast<void*>(static_cast<intptr_t>(entry));
 }
 
 
@@ -1606,20 +1589,14 @@ void HeapObjectsSet::Clear() {
 bool HeapObjectsSet::Contains(Object* obj) {
   if (!obj->IsHeapObject()) return false;
   HeapObject* object = HeapObject::cast(obj);
-  HashMap::Entry* cache_entry =
-      entries_.Lookup(object, HeapEntriesMap::Hash(object), false);
-  return cache_entry != NULL;
+  return entries_.Lookup(object, HeapEntriesMap::Hash(object), false) != NULL;
 }
 
 
 void HeapObjectsSet::Insert(Object* obj) {
   if (!obj->IsHeapObject()) return;
   HeapObject* object = HeapObject::cast(obj);
-  HashMap::Entry* cache_entry =
-      entries_.Lookup(object, HeapEntriesMap::Hash(object), true);
-  if (cache_entry->value == NULL) {
-    cache_entry->value = HeapEntriesMap::kHeapEntryPlaceholder;
-  }
+  entries_.Lookup(object, HeapEntriesMap::Hash(object), true);
 }
 
 
@@ -1627,12 +1604,9 @@ const char* HeapObjectsSet::GetTag(Object* obj) {
   HeapObject* object = HeapObject::cast(obj);
   HashMap::Entry* cache_entry =
       entries_.Lookup(object, HeapEntriesMap::Hash(object), false);
-  if (cache_entry != NULL
-      && cache_entry->value != HeapEntriesMap::kHeapEntryPlaceholder) {
-    return reinterpret_cast<const char*>(cache_entry->value);
-  } else {
-    return NULL;
-  }
+  return cache_entry != NULL
+      ? reinterpret_cast<const char*>(cache_entry->value)
+      : NULL;
 }
 
 
@@ -1674,126 +1648,83 @@ V8HeapExplorer::~V8HeapExplorer() {
 }
 
 
-HeapEntry* V8HeapExplorer::AllocateEntry(
-    HeapThing ptr, int children_count, int retainers_count) {
-  return AddEntry(
-      reinterpret_cast<HeapObject*>(ptr), children_count, retainers_count);
+HeapEntry* V8HeapExplorer::AllocateEntry(HeapThing ptr) {
+  return AddEntry(reinterpret_cast<HeapObject*>(ptr));
 }
 
 
-HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
-                                    int children_count,
-                                    int retainers_count) {
+HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
   if (object == kInternalRootObject) {
-    ASSERT(retainers_count == 0);
-    return snapshot_->AddRootEntry(children_count);
+    snapshot_->AddRootEntry();
+    return snapshot_->root();
   } else if (object == kGcRootsObject) {
-    return snapshot_->AddGcRootsEntry(children_count, retainers_count);
+    HeapEntry* entry = snapshot_->AddGcRootsEntry();
+    return entry;
   } else if (object >= kFirstGcSubrootObject && object < kLastGcSubrootObject) {
-    return snapshot_->AddGcSubrootEntry(
-        GetGcSubrootOrder(object),
-        children_count,
-        retainers_count);
+    HeapEntry* entry = snapshot_->AddGcSubrootEntry(GetGcSubrootOrder(object));
+    return entry;
   } else if (object->IsJSFunction()) {
     JSFunction* func = JSFunction::cast(object);
     SharedFunctionInfo* shared = func->shared();
     const char* name = shared->bound() ? "native_bind" :
         collection_->names()->GetName(String::cast(shared->name()));
-    return AddEntry(object,
-                    HeapEntry::kClosure,
-                    name,
-                    children_count,
-                    retainers_count);
+    return AddEntry(object, HeapEntry::kClosure, name);
   } else if (object->IsJSRegExp()) {
     JSRegExp* re = JSRegExp::cast(object);
     return AddEntry(object,
                     HeapEntry::kRegExp,
-                    collection_->names()->GetName(re->Pattern()),
-                    children_count,
-                    retainers_count);
+                    collection_->names()->GetName(re->Pattern()));
   } else if (object->IsJSObject()) {
-    return AddEntry(object,
-                    HeapEntry::kObject,
-                    "",
-                    children_count,
-                    retainers_count);
+    const char* name = collection_->names()->GetName(
+        GetConstructorName(JSObject::cast(object)));
+    if (object->IsJSGlobalObject()) {
+      const char* tag = objects_tags_.GetTag(object);
+      if (tag != NULL) {
+        name = collection_->names()->GetFormatted("%s / %s", name, tag);
+      }
+    }
+    return AddEntry(object, HeapEntry::kObject, name);
   } else if (object->IsString()) {
     return AddEntry(object,
                     HeapEntry::kString,
-                    collection_->names()->GetName(String::cast(object)),
-                    children_count,
-                    retainers_count);
+                    collection_->names()->GetName(String::cast(object)));
   } else if (object->IsCode()) {
-    return AddEntry(object,
-                    HeapEntry::kCode,
-                    "",
-                    children_count,
-                    retainers_count);
+    return AddEntry(object, HeapEntry::kCode, "");
   } else if (object->IsSharedFunctionInfo()) {
-    SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
+    String* name = String::cast(SharedFunctionInfo::cast(object)->name());
     return AddEntry(object,
                     HeapEntry::kCode,
-                    collection_->names()->GetName(String::cast(shared->name())),
-                    children_count,
-                    retainers_count);
+                    collection_->names()->GetName(name));
   } else if (object->IsScript()) {
-    Script* script = Script::cast(object);
+    Object* name = Script::cast(object)->name();
     return AddEntry(object,
                     HeapEntry::kCode,
-                    script->name()->IsString() ?
-                        collection_->names()->GetName(
-                            String::cast(script->name()))
-                        : "",
-                    children_count,
-                    retainers_count);
+                    name->IsString()
+                        ? collection_->names()->GetName(String::cast(name))
+                        : "");
   } else if (object->IsGlobalContext()) {
-    return AddEntry(object,
-                    HeapEntry::kHidden,
-                    "system / GlobalContext",
-                    children_count,
-                    retainers_count);
+    return AddEntry(object, HeapEntry::kHidden, "system / GlobalContext");
   } else if (object->IsContext()) {
-    return AddEntry(object,
-                    HeapEntry::kHidden,
-                    "system / Context",
-                    children_count,
-                    retainers_count);
+    return AddEntry(object, HeapEntry::kHidden, "system / Context");
   } else if (object->IsFixedArray() ||
              object->IsFixedDoubleArray() ||
              object->IsByteArray() ||
              object->IsExternalArray()) {
-    const char* tag = objects_tags_.GetTag(object);
-    return AddEntry(object,
-                    HeapEntry::kArray,
-                    tag != NULL ? tag : "",
-                    children_count,
-                    retainers_count);
+    return AddEntry(object, HeapEntry::kArray, "");
   } else if (object->IsHeapNumber()) {
-    return AddEntry(object,
-                    HeapEntry::kHeapNumber,
-                    "number",
-                    children_count,
-                    retainers_count);
+    return AddEntry(object, HeapEntry::kHeapNumber, "number");
   }
-  return AddEntry(object,
-                  HeapEntry::kHidden,
-                  GetSystemEntryName(object),
-                  children_count,
-                  retainers_count);
+  return AddEntry(object, HeapEntry::kHidden, GetSystemEntryName(object));
 }
 
 
 HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
                                     HeapEntry::Type type,
-                                    const char* name,
-                                    int children_count,
-                                    int retainers_count) {
-  return snapshot_->AddEntry(type,
-                             name,
-                             collection_->GetObjectId(object->address()),
-                             object->Size(),
-                             children_count,
-                             retainers_count);
+                                    const char* name) {
+  int object_size = object->Size();
+  SnapshotObjectId object_id =
+    collection_->GetObjectId(object->address(), object_size);
+  return snapshot_->AddEntry(type, name, object_id, object_size);
 }
 
 
@@ -1862,10 +1793,10 @@ class IndexedReferencesExtractor : public ObjectVisitor {
  public:
   IndexedReferencesExtractor(V8HeapExplorer* generator,
                              HeapObject* parent_obj,
-                             HeapEntry* parent_entry)
+                             int parent)
       : generator_(generator),
         parent_obj_(parent_obj),
-        parent_(parent_entry),
+        parent_(parent),
         next_index_(1) {
   }
   void VisitPointers(Object** start, Object** end) {
@@ -1894,178 +1825,40 @@ class IndexedReferencesExtractor : public ObjectVisitor {
   }
   V8HeapExplorer* generator_;
   HeapObject* parent_obj_;
-  HeapEntry* parent_;
+  int parent_;
   int next_index_;
 };
 
 
 void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
-  HeapEntry* entry = GetEntry(obj);
-  if (entry == NULL) return;  // No interest in this object.
+  HeapEntry* heap_entry = GetEntry(obj);
+  if (heap_entry == NULL) return;  // No interest in this object.
+  int entry = heap_entry->index();
 
   bool extract_indexed_refs = true;
   if (obj->IsJSGlobalProxy()) {
-    // We need to reference JS global objects from snapshot's root.
-    // We use JSGlobalProxy because this is what embedder (e.g. browser)
-    // uses for the global object.
-    JSGlobalProxy* proxy = JSGlobalProxy::cast(obj);
-    SetRootShortcutReference(proxy->map()->prototype());
+    ExtractJSGlobalProxyReferences(JSGlobalProxy::cast(obj));
   } else if (obj->IsJSObject()) {
-    JSObject* js_obj = JSObject::cast(obj);
-    ExtractClosureReferences(js_obj, entry);
-    ExtractPropertyReferences(js_obj, entry);
-    ExtractElementReferences(js_obj, entry);
-    ExtractInternalReferences(js_obj, entry);
-    SetPropertyReference(
-        obj, entry, heap_->Proto_symbol(), js_obj->GetPrototype());
-    if (obj->IsJSFunction()) {
-      JSFunction* js_fun = JSFunction::cast(js_obj);
-      Object* proto_or_map = js_fun->prototype_or_initial_map();
-      if (!proto_or_map->IsTheHole()) {
-        if (!proto_or_map->IsMap()) {
-          SetPropertyReference(
-              obj, entry,
-              heap_->prototype_symbol(), proto_or_map,
-              NULL,
-              JSFunction::kPrototypeOrInitialMapOffset);
-        } else {
-          SetPropertyReference(
-              obj, entry,
-              heap_->prototype_symbol(), js_fun->prototype());
-        }
-      }
-      SharedFunctionInfo* shared_info = js_fun->shared();
-      // JSFunction has either bindings or literals and never both.
-      bool bound = shared_info->bound();
-      TagObject(js_fun->literals_or_bindings(),
-                bound ? "(function bindings)" : "(function literals)");
-      SetInternalReference(js_fun, entry,
-                           bound ? "bindings" : "literals",
-                           js_fun->literals_or_bindings(),
-                           JSFunction::kLiteralsOffset);
-      SetInternalReference(js_fun, entry,
-                           "shared", shared_info,
-                           JSFunction::kSharedFunctionInfoOffset);
-      TagObject(js_fun->unchecked_context(), "(context)");
-      SetInternalReference(js_fun, entry,
-                           "context", js_fun->unchecked_context(),
-                           JSFunction::kContextOffset);
-      for (int i = JSFunction::kNonWeakFieldsEndOffset;
-           i < JSFunction::kSize;
-           i += kPointerSize) {
-        SetWeakReference(js_fun, entry, i, *HeapObject::RawField(js_fun, i), i);
-      }
-    }
-    TagObject(js_obj->properties(), "(object properties)");
-    SetInternalReference(obj, entry,
-                         "properties", js_obj->properties(),
-                         JSObject::kPropertiesOffset);
-    TagObject(js_obj->elements(), "(object elements)");
-    SetInternalReference(obj, entry,
-                         "elements", js_obj->elements(),
-                         JSObject::kElementsOffset);
+    ExtractJSObjectReferences(entry, JSObject::cast(obj));
   } else if (obj->IsString()) {
-    if (obj->IsConsString()) {
-      ConsString* cs = ConsString::cast(obj);
-      SetInternalReference(obj, entry, 1, cs->first());
-      SetInternalReference(obj, entry, 2, cs->second());
-    }
-    if (obj->IsSlicedString()) {
-      SlicedString* ss = SlicedString::cast(obj);
-      SetInternalReference(obj, entry, "parent", ss->parent());
-    }
+    ExtractStringReferences(entry, String::cast(obj));
     extract_indexed_refs = false;
-  } else if (obj->IsGlobalContext()) {
-    Context* context = Context::cast(obj);
-    TagObject(context->jsfunction_result_caches(),
-              "(context func. result caches)");
-    TagObject(context->normalized_map_cache(), "(context norm. map cache)");
-    TagObject(context->runtime_context(), "(runtime context)");
-    TagObject(context->data(), "(context data)");
-    for (int i = Context::FIRST_WEAK_SLOT;
-         i < Context::GLOBAL_CONTEXT_SLOTS;
-         ++i) {
-      SetWeakReference(obj, entry,
-                       i, context->get(i),
-                       FixedArray::OffsetOfElementAt(i));
-    }
+  } else if (obj->IsContext()) {
+    ExtractContextReferences(entry, Context::cast(obj));
   } else if (obj->IsMap()) {
-    Map* map = Map::cast(obj);
-    SetInternalReference(obj, entry,
-                         "prototype", map->prototype(), Map::kPrototypeOffset);
-    SetInternalReference(obj, entry,
-                         "constructor", map->constructor(),
-                         Map::kConstructorOffset);
-    if (!map->instance_descriptors()->IsEmpty()) {
-      TagObject(map->instance_descriptors(), "(map descriptors)");
-      SetInternalReference(obj, entry,
-                           "descriptors", map->instance_descriptors(),
-                           Map::kInstanceDescriptorsOrBitField3Offset);
-    }
-    if (map->prototype_transitions() != heap_->empty_fixed_array()) {
-      TagObject(map->prototype_transitions(), "(prototype transitions)");
-      SetInternalReference(obj,
-                           entry,
-                           "prototype_transitions",
-                           map->prototype_transitions(),
-                           Map::kPrototypeTransitionsOffset);
-    }
-    SetInternalReference(obj, entry,
-                         "code_cache", map->code_cache(),
-                         Map::kCodeCacheOffset);
+    ExtractMapReferences(entry, Map::cast(obj));
   } else if (obj->IsSharedFunctionInfo()) {
-    SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
-    SetInternalReference(obj, entry,
-                         "name", shared->name(),
-                         SharedFunctionInfo::kNameOffset);
-    SetInternalReference(obj, entry,
-                         "code", shared->unchecked_code(),
-                         SharedFunctionInfo::kCodeOffset);
-    TagObject(shared->scope_info(), "(function scope info)");
-    SetInternalReference(obj, entry,
-                         "scope_info", shared->scope_info(),
-                         SharedFunctionInfo::kScopeInfoOffset);
-    SetInternalReference(obj, entry,
-                         "instance_class_name", shared->instance_class_name(),
-                         SharedFunctionInfo::kInstanceClassNameOffset);
-    SetInternalReference(obj, entry,
-                         "script", shared->script(),
-                         SharedFunctionInfo::kScriptOffset);
-    SetWeakReference(obj, entry,
-                     1, shared->initial_map(),
-                     SharedFunctionInfo::kInitialMapOffset);
+    ExtractSharedFunctionInfoReferences(entry, SharedFunctionInfo::cast(obj));
   } else if (obj->IsScript()) {
-    Script* script = Script::cast(obj);
-    SetInternalReference(obj, entry,
-                         "source", script->source(),
-                         Script::kSourceOffset);
-    SetInternalReference(obj, entry,
-                         "name", script->name(),
-                         Script::kNameOffset);
-    SetInternalReference(obj, entry,
-                         "data", script->data(),
-                         Script::kDataOffset);
-    SetInternalReference(obj, entry,
-                         "context_data", script->context_data(),
-                         Script::kContextOffset);
-    TagObject(script->line_ends(), "(script line ends)");
-    SetInternalReference(obj, entry,
-                         "line_ends", script->line_ends(),
-                         Script::kLineEndsOffset);
+    ExtractScriptReferences(entry, Script::cast(obj));
   } else if (obj->IsCodeCache()) {
-    CodeCache* code_cache = CodeCache::cast(obj);
-    TagObject(code_cache->default_cache(), "(default code cache)");
-    SetInternalReference(obj, entry,
-                         "default_cache", code_cache->default_cache(),
-                         CodeCache::kDefaultCacheOffset);
-    TagObject(code_cache->normal_type_cache(), "(code type cache)");
-    SetInternalReference(obj, entry,
-                         "type_cache", code_cache->normal_type_cache(),
-                         CodeCache::kNormalTypeCacheOffset);
+    ExtractCodeCacheReferences(entry, CodeCache::cast(obj));
   } else if (obj->IsCode()) {
-    Code* code = Code::cast(obj);
-    TagObject(code->unchecked_relocation_info(), "(code relocation info)");
-    TagObject(code->unchecked_deoptimization_data(), "(code deopt data)");
+    ExtractCodeReferences(entry, Code::cast(obj));
+  } else if (obj->IsJSGlobalPropertyCell()) {
+    ExtractJSGlobalPropertyCellReferences(
+        entry, JSGlobalPropertyCell::cast(obj));
+    extract_indexed_refs = false;
   }
   if (extract_indexed_refs) {
     SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
@@ -2075,14 +1868,266 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
 }
 
 
-void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj,
-                                              HeapEntry* entry) {
+void V8HeapExplorer::ExtractJSGlobalProxyReferences(JSGlobalProxy* proxy) {
+  // We need to reference JS global objects from snapshot's root.
+  // We use JSGlobalProxy because this is what embedder (e.g. browser)
+  // uses for the global object.
+  Object* object = proxy->map()->prototype();
+  bool is_debug_object = false;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  is_debug_object = object->IsGlobalObject() &&
+      Isolate::Current()->debug()->IsDebugGlobal(GlobalObject::cast(object));
+#endif
+  if (!is_debug_object) {
+    SetUserGlobalReference(object);
+  }
+}
+
+
+void V8HeapExplorer::ExtractJSObjectReferences(
+    int entry, JSObject* js_obj) {
+  HeapObject* obj = js_obj;
+  ExtractClosureReferences(js_obj, entry);
+  ExtractPropertyReferences(js_obj, entry);
+  ExtractElementReferences(js_obj, entry);
+  ExtractInternalReferences(js_obj, entry);
+  SetPropertyReference(
+      obj, entry, heap_->Proto_symbol(), js_obj->GetPrototype());
+  if (obj->IsJSFunction()) {
+    JSFunction* js_fun = JSFunction::cast(js_obj);
+    Object* proto_or_map = js_fun->prototype_or_initial_map();
+    if (!proto_or_map->IsTheHole()) {
+      if (!proto_or_map->IsMap()) {
+        SetPropertyReference(
+            obj, entry,
+            heap_->prototype_symbol(), proto_or_map,
+            NULL,
+            JSFunction::kPrototypeOrInitialMapOffset);
+      } else {
+        SetPropertyReference(
+            obj, entry,
+            heap_->prototype_symbol(), js_fun->prototype());
+      }
+    }
+    SharedFunctionInfo* shared_info = js_fun->shared();
+    // JSFunction has either bindings or literals and never both.
+    bool bound = shared_info->bound();
+    TagObject(js_fun->literals_or_bindings(),
+              bound ? "(function bindings)" : "(function literals)");
+    SetInternalReference(js_fun, entry,
+                         bound ? "bindings" : "literals",
+                         js_fun->literals_or_bindings(),
+                         JSFunction::kLiteralsOffset);
+    TagObject(shared_info, "(shared function info)");
+    SetInternalReference(js_fun, entry,
+                         "shared", shared_info,
+                         JSFunction::kSharedFunctionInfoOffset);
+    TagObject(js_fun->unchecked_context(), "(context)");
+    SetInternalReference(js_fun, entry,
+                         "context", js_fun->unchecked_context(),
+                         JSFunction::kContextOffset);
+    for (int i = JSFunction::kNonWeakFieldsEndOffset;
+         i < JSFunction::kSize;
+         i += kPointerSize) {
+      SetWeakReference(js_fun, entry, i, *HeapObject::RawField(js_fun, i), i);
+    }
+  } else if (obj->IsGlobalObject()) {
+    GlobalObject* global_obj = GlobalObject::cast(obj);
+    SetInternalReference(global_obj, entry,
+                         "builtins", global_obj->builtins(),
+                         GlobalObject::kBuiltinsOffset);
+    SetInternalReference(global_obj, entry,
+                         "global_context", global_obj->global_context(),
+                         GlobalObject::kGlobalContextOffset);
+    SetInternalReference(global_obj, entry,
+                         "global_receiver", global_obj->global_receiver(),
+                         GlobalObject::kGlobalReceiverOffset);
+  }
+  TagObject(js_obj->properties(), "(object properties)");
+  SetInternalReference(obj, entry,
+                       "properties", js_obj->properties(),
+                       JSObject::kPropertiesOffset);
+  TagObject(js_obj->elements(), "(object elements)");
+  SetInternalReference(obj, entry,
+                       "elements", js_obj->elements(),
+                       JSObject::kElementsOffset);
+}
+
+
+void V8HeapExplorer::ExtractStringReferences(int entry, String* string) {
+  if (string->IsConsString()) {
+    ConsString* cs = ConsString::cast(string);
+    SetInternalReference(cs, entry, "first", cs->first());
+    SetInternalReference(cs, entry, "second", cs->second());
+  } else if (string->IsSlicedString()) {
+    SlicedString* ss = SlicedString::cast(string);
+    SetInternalReference(ss, entry, "parent", ss->parent());
+  }
+}
+
+
+void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
+#define EXTRACT_CONTEXT_FIELD(index, type, name) \
+  SetInternalReference(context, entry, #name, context->get(Context::index), \
+      FixedArray::OffsetOfElementAt(Context::index));
+  EXTRACT_CONTEXT_FIELD(CLOSURE_INDEX, JSFunction, closure);
+  EXTRACT_CONTEXT_FIELD(PREVIOUS_INDEX, Context, previous);
+  EXTRACT_CONTEXT_FIELD(EXTENSION_INDEX, Object, extension);
+  EXTRACT_CONTEXT_FIELD(GLOBAL_INDEX, GlobalObject, global);
+  if (context->IsGlobalContext()) {
+    TagObject(context->jsfunction_result_caches(),
+              "(context func. result caches)");
+    TagObject(context->normalized_map_cache(), "(context norm. map cache)");
+    TagObject(context->runtime_context(), "(runtime context)");
+    TagObject(context->data(), "(context data)");
+    GLOBAL_CONTEXT_FIELDS(EXTRACT_CONTEXT_FIELD);
+#undef EXTRACT_CONTEXT_FIELD
+    for (int i = Context::FIRST_WEAK_SLOT;
+         i < Context::GLOBAL_CONTEXT_SLOTS;
+         ++i) {
+      SetWeakReference(context, entry, i, context->get(i),
+          FixedArray::OffsetOfElementAt(i));
+    }
+  }
+}
+
+
+void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
+  SetInternalReference(map, entry,
+                       "prototype", map->prototype(), Map::kPrototypeOffset);
+  SetInternalReference(map, entry,
+                       "constructor", map->constructor(),
+                       Map::kConstructorOffset);
+  if (!map->instance_descriptors()->IsEmpty()) {
+    TagObject(map->instance_descriptors(), "(map descriptors)");
+    SetInternalReference(map, entry,
+                         "descriptors", map->instance_descriptors(),
+                         Map::kInstanceDescriptorsOrBitField3Offset);
+  }
+  if (map->unchecked_prototype_transitions()->IsFixedArray()) {
+    TagObject(map->prototype_transitions(), "(prototype transitions)");
+    SetInternalReference(map, entry,
+                         "prototype_transitions", map->prototype_transitions(),
+                         Map::kPrototypeTransitionsOrBackPointerOffset);
+  } else {
+    SetInternalReference(map, entry,
+                         "back_pointer", map->GetBackPointer(),
+                         Map::kPrototypeTransitionsOrBackPointerOffset);
+  }
+  SetInternalReference(map, entry,
+                       "code_cache", map->code_cache(),
+                       Map::kCodeCacheOffset);
+}
+
+
+void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
+    int entry, SharedFunctionInfo* shared) {
+  HeapObject* obj = shared;
+  SetInternalReference(obj, entry,
+                       "name", shared->name(),
+                       SharedFunctionInfo::kNameOffset);
+  TagObject(shared->code(), "(code)");
+  SetInternalReference(obj, entry,
+                       "code", shared->code(),
+                       SharedFunctionInfo::kCodeOffset);
+  TagObject(shared->scope_info(), "(function scope info)");
+  SetInternalReference(obj, entry,
+                       "scope_info", shared->scope_info(),
+                       SharedFunctionInfo::kScopeInfoOffset);
+  SetInternalReference(obj, entry,
+                       "instance_class_name", shared->instance_class_name(),
+                       SharedFunctionInfo::kInstanceClassNameOffset);
+  SetInternalReference(obj, entry,
+                       "script", shared->script(),
+                       SharedFunctionInfo::kScriptOffset);
+  TagObject(shared->construct_stub(), "(code)");
+  SetInternalReference(obj, entry,
+                       "construct_stub", shared->construct_stub(),
+                       SharedFunctionInfo::kConstructStubOffset);
+  SetInternalReference(obj, entry,
+                       "function_data", shared->function_data(),
+                       SharedFunctionInfo::kFunctionDataOffset);
+  SetInternalReference(obj, entry,
+                       "debug_info", shared->debug_info(),
+                       SharedFunctionInfo::kDebugInfoOffset);
+  SetInternalReference(obj, entry,
+                       "inferred_name", shared->inferred_name(),
+                       SharedFunctionInfo::kInferredNameOffset);
+  SetInternalReference(obj, entry,
+                       "this_property_assignments",
+                       shared->this_property_assignments(),
+                       SharedFunctionInfo::kThisPropertyAssignmentsOffset);
+  SetWeakReference(obj, entry,
+                   1, shared->initial_map(),
+                   SharedFunctionInfo::kInitialMapOffset);
+}
+
+
+void V8HeapExplorer::ExtractScriptReferences(int entry, Script* script) {
+  HeapObject* obj = script;
+  SetInternalReference(obj, entry,
+                       "source", script->source(),
+                       Script::kSourceOffset);
+  SetInternalReference(obj, entry,
+                       "name", script->name(),
+                       Script::kNameOffset);
+  SetInternalReference(obj, entry,
+                       "data", script->data(),
+                       Script::kDataOffset);
+  SetInternalReference(obj, entry,
+                       "context_data", script->context_data(),
+                       Script::kContextOffset);
+  TagObject(script->line_ends(), "(script line ends)");
+  SetInternalReference(obj, entry,
+                       "line_ends", script->line_ends(),
+                       Script::kLineEndsOffset);
+}
+
+
+void V8HeapExplorer::ExtractCodeCacheReferences(
+    int entry, CodeCache* code_cache) {
+  TagObject(code_cache->default_cache(), "(default code cache)");
+  SetInternalReference(code_cache, entry,
+                       "default_cache", code_cache->default_cache(),
+                       CodeCache::kDefaultCacheOffset);
+  TagObject(code_cache->normal_type_cache(), "(code type cache)");
+  SetInternalReference(code_cache, entry,
+                       "type_cache", code_cache->normal_type_cache(),
+                       CodeCache::kNormalTypeCacheOffset);
+}
+
+
+void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
+  TagObject(code->relocation_info(), "(code relocation info)");
+  SetInternalReference(code, entry,
+                       "relocation_info", code->relocation_info(),
+                       Code::kRelocationInfoOffset);
+  SetInternalReference(code, entry,
+                       "handler_table", code->handler_table(),
+                       Code::kHandlerTableOffset);
+  TagObject(code->deoptimization_data(), "(code deopt data)");
+  SetInternalReference(code, entry,
+                       "deoptimization_data", code->deoptimization_data(),
+                       Code::kDeoptimizationDataOffset);
+  SetInternalReference(code, entry,
+                       "type_feedback_info", code->type_feedback_info(),
+                       Code::kTypeFeedbackInfoOffset);
+  SetInternalReference(code, entry,
+                       "gc_metadata", code->gc_metadata(),
+                       Code::kGCMetadataOffset);
+}
+
+
+void V8HeapExplorer::ExtractJSGlobalPropertyCellReferences(
+    int entry, JSGlobalPropertyCell* cell) {
+  SetInternalReference(cell, entry, "value", cell->value());
+}
+
+
+void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj, int entry) {
   if (!js_obj->IsJSFunction()) return;
 
   JSFunction* func = JSFunction::cast(js_obj);
-  Context* context = func->context();
-  ScopeInfo* scope_info = context->closure()->shared()->scope_info();
-
   if (func->shared()->bound()) {
     FixedArray* bindings = func->function_bindings();
     SetNativeBindReference(js_obj, entry, "bound_this",
@@ -2098,6 +2143,8 @@ void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj,
                              bindings->get(i));
     }
   } else {
+    Context* context = func->context()->declaration_context();
+    ScopeInfo* scope_info = context->closure()->shared()->scope_info();
     // Add context allocated locals.
     int context_locals = scope_info->ContextLocalCount();
     for (int i = 0; i < context_locals; ++i) {
@@ -2109,19 +2156,17 @@ void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj,
     // Add function variable.
     if (scope_info->HasFunctionName()) {
       String* name = scope_info->FunctionName();
-      int idx = Context::MIN_CONTEXT_SLOTS + context_locals;
-#ifdef DEBUG
       VariableMode mode;
-      ASSERT(idx == scope_info->FunctionContextSlotIndex(name, &mode));
-#endif
-      SetClosureReference(js_obj, entry, name, context->get(idx));
+      int idx = scope_info->FunctionContextSlotIndex(name, &mode);
+      if (idx >= 0) {
+        SetClosureReference(js_obj, entry, name, context->get(idx));
+      }
     }
   }
 }
 
 
-void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj,
-                                               HeapEntry* entry) {
+void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
   if (js_obj->HasFastProperties()) {
     DescriptorArray* descs = js_obj->map()->instance_descriptors();
     for (int i = 0; i < descs->number_of_descriptors(); i++) {
@@ -2178,15 +2223,15 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj,
       Object* k = dictionary->KeyAt(i);
       if (dictionary->IsKey(k)) {
         Object* target = dictionary->ValueAt(i);
-        SetPropertyReference(
-            js_obj, entry, String::cast(k), target);
         // We assume that global objects can only have slow properties.
-        if (target->IsJSGlobalPropertyCell()) {
-          SetPropertyShortcutReference(js_obj,
-                                       entry,
-                                       String::cast(k),
-                                       JSGlobalPropertyCell::cast(
-                                           target)->value());
+        Object* value = target->IsJSGlobalPropertyCell()
+            ? JSGlobalPropertyCell::cast(target)->value()
+            : target;
+        if (String::cast(k)->length() > 0) {
+          SetPropertyReference(js_obj, entry, String::cast(k), value);
+        } else {
+          TagObject(value, "(hidden properties)");
+          SetInternalReference(js_obj, entry, "hidden_properties", value);
         }
       }
     }
@@ -2194,8 +2239,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj,
 }
 
 
-void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj,
-                                              HeapEntry* entry) {
+void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) {
   if (js_obj->HasFastElements()) {
     FixedArray* elements = FixedArray::cast(js_obj->elements());
     int length = js_obj->IsJSArray() ?
@@ -2221,8 +2265,7 @@ void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj,
 }
 
 
-void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj,
-                                               HeapEntry* entry) {
+void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj, int entry) {
   int length = js_obj->GetInternalFieldCount();
   for (int i = 0; i < length; ++i) {
     Object* o = js_obj->GetInternalField(i);
@@ -2348,6 +2391,7 @@ bool V8HeapExplorer::IterateAndExtractReferences(
     filler_ = NULL;
     return false;
   }
+
   SetRootGcRootsReference();
   RootsReferencesExtractor extractor;
   heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
@@ -2355,148 +2399,127 @@ bool V8HeapExplorer::IterateAndExtractReferences(
   heap_->IterateRoots(&extractor, VISIT_ALL);
   extractor.FillReferences(this);
   filler_ = NULL;
-  return progress_->ProgressReport(false);
-}
-
-
-bool V8HeapExplorer::IterateAndSetObjectNames(SnapshotFillerInterface* filler) {
-  HeapIterator iterator(HeapIterator::kFilterUnreachable);
-  filler_ = filler;
-  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
-    SetObjectName(obj);
-  }
-  return true;
+  return progress_->ProgressReport(true);
 }
 
 
-void V8HeapExplorer::SetObjectName(HeapObject* object) {
-  if (!object->IsJSObject() || object->IsJSRegExp() || object->IsJSFunction()) {
-    return;
-  }
-  const char* name = collection_->names()->GetName(
-      GetConstructorName(JSObject::cast(object)));
-  if (object->IsJSGlobalObject()) {
-    const char* tag = objects_tags_.GetTag(object);
-    if (tag != NULL) {
-      name = collection_->names()->GetFormatted("%s / %s", name, tag);
-    }
-  }
-  GetEntry(object)->set_name(name);
+bool V8HeapExplorer::IsEssentialObject(Object* object) {
+  // We have to use raw_unchecked_* versions because checked versions
+  // would fail during iteration over object properties.
+  return object->IsHeapObject()
+      && !object->IsOddball()
+      && object != heap_->raw_unchecked_empty_byte_array()
+      && object != heap_->raw_unchecked_empty_fixed_array()
+      && object != heap_->raw_unchecked_empty_descriptor_array()
+      && object != heap_->raw_unchecked_fixed_array_map()
+      && object != heap_->raw_unchecked_global_property_cell_map()
+      && object != heap_->raw_unchecked_shared_function_info_map()
+      && object != heap_->raw_unchecked_free_space_map()
+      && object != heap_->raw_unchecked_one_pointer_filler_map()
+      && object != heap_->raw_unchecked_two_pointer_filler_map();
 }
 
 
 void V8HeapExplorer::SetClosureReference(HeapObject* parent_obj,
-                                         HeapEntry* parent_entry,
+                                         int parent_entry,
                                          String* reference_name,
                                          Object* child_obj) {
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry != NULL) {
     filler_->SetNamedReference(HeapGraphEdge::kContextVariable,
-                               parent_obj,
                                parent_entry,
                                collection_->names()->GetName(reference_name),
-                               child_obj,
                                child_entry);
   }
 }
 
 
 void V8HeapExplorer::SetNativeBindReference(HeapObject* parent_obj,
-                                            HeapEntry* parent_entry,
+                                            int parent_entry,
                                             const char* reference_name,
                                             Object* child_obj) {
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry != NULL) {
     filler_->SetNamedReference(HeapGraphEdge::kShortcut,
-                               parent_obj,
                                parent_entry,
                                reference_name,
-                               child_obj,
                                child_entry);
   }
 }
 
 
 void V8HeapExplorer::SetElementReference(HeapObject* parent_obj,
-                                         HeapEntry* parent_entry,
+                                         int parent_entry,
                                          int index,
                                          Object* child_obj) {
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry != NULL) {
     filler_->SetIndexedReference(HeapGraphEdge::kElement,
-                                 parent_obj,
                                  parent_entry,
                                  index,
-                                 child_obj,
                                  child_entry);
   }
 }
 
 
 void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
-                                          HeapEntry* parent_entry,
+                                          int parent_entry,
                                           const char* reference_name,
                                           Object* child_obj,
                                           int field_offset) {
   HeapEntry* child_entry = GetEntry(child_obj);
-  if (child_entry != NULL) {
+  if (child_entry == NULL) return;
+  if (IsEssentialObject(child_obj)) {
     filler_->SetNamedReference(HeapGraphEdge::kInternal,
-                               parent_obj,
                                parent_entry,
                                reference_name,
-                               child_obj,
                                child_entry);
-    IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
   }
+  IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
 }
 
 
 void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
-                                          HeapEntry* parent_entry,
+                                          int parent_entry,
                                           int index,
                                           Object* child_obj,
                                           int field_offset) {
   HeapEntry* child_entry = GetEntry(child_obj);
-  if (child_entry != NULL) {
+  if (child_entry == NULL) return;
+  if (IsEssentialObject(child_obj)) {
     filler_->SetNamedReference(HeapGraphEdge::kInternal,
-                               parent_obj,
                                parent_entry,
                                collection_->names()->GetName(index),
-                               child_obj,
                                child_entry);
-    IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
   }
+  IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
 }
 
 
 void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj,
-                                        HeapEntry* parent_entry,
+                                        int parent_entry,
                                         int index,
                                         Object* child_obj) {
   HeapEntry* child_entry = GetEntry(child_obj);
-  if (child_entry != NULL) {
+  if (child_entry != NULL && IsEssentialObject(child_obj)) {
     filler_->SetIndexedReference(HeapGraphEdge::kHidden,
-                                 parent_obj,
                                  parent_entry,
                                  index,
-                                 child_obj,
                                  child_entry);
   }
 }
 
 
 void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
-                                      HeapEntry* parent_entry,
+                                      int parent_entry,
                                       int index,
                                       Object* child_obj,
                                       int field_offset) {
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry != NULL) {
     filler_->SetIndexedReference(HeapGraphEdge::kWeak,
-                                 parent_obj,
                                  parent_entry,
                                  index,
-                                 child_obj,
                                  child_entry);
     IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
   }
@@ -2504,7 +2527,7 @@ void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
 
 
 void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
-                                          HeapEntry* parent_entry,
+                                          int parent_entry,
                                           String* reference_name,
                                           Object* child_obj,
                                           const char* name_format_string,
@@ -2521,10 +2544,8 @@ void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
         collection_->names()->GetName(reference_name);
 
     filler_->SetNamedReference(type,
-                               parent_obj,
                                parent_entry,
                                name,
-                               child_obj,
                                child_entry);
     IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
   }
@@ -2532,16 +2553,14 @@ void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
 
 
 void V8HeapExplorer::SetPropertyShortcutReference(HeapObject* parent_obj,
-                                                  HeapEntry* parent_entry,
+                                                  int parent_entry,
                                                   String* reference_name,
                                                   Object* child_obj) {
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry != NULL) {
     filler_->SetNamedReference(HeapGraphEdge::kShortcut,
-                               parent_obj,
                                parent_entry,
                                collection_->names()->GetName(reference_name),
-                               child_obj,
                                child_entry);
   }
 }
@@ -2550,26 +2569,26 @@ void V8HeapExplorer::SetPropertyShortcutReference(HeapObject* parent_obj,
 void V8HeapExplorer::SetRootGcRootsReference() {
   filler_->SetIndexedAutoIndexReference(
       HeapGraphEdge::kElement,
-      kInternalRootObject, snapshot_->root(),
-      kGcRootsObject, snapshot_->gc_roots());
+      snapshot_->root()->index(),
+      snapshot_->gc_roots());
 }
 
 
-void V8HeapExplorer::SetRootShortcutReference(Object* child_obj) {
+void V8HeapExplorer::SetUserGlobalReference(Object* child_obj) {
   HeapEntry* child_entry = GetEntry(child_obj);
   ASSERT(child_entry != NULL);
   filler_->SetNamedAutoIndexReference(
       HeapGraphEdge::kShortcut,
-      kInternalRootObject, snapshot_->root(),
-      child_obj, child_entry);
+      snapshot_->root()->index(),
+      child_entry);
 }
 
 
 void V8HeapExplorer::SetGcRootsReference(VisitorSynchronization::SyncTag tag) {
   filler_->SetIndexedAutoIndexReference(
       HeapGraphEdge::kElement,
-      kGcRootsObject, snapshot_->gc_roots(),
-      GetNthGcSubrootObject(tag), snapshot_->gc_subroot(tag));
+      snapshot_->gc_roots()->index(),
+      snapshot_->gc_subroot(tag));
 }
 
 
@@ -2577,21 +2596,48 @@ void V8HeapExplorer::SetGcSubrootReference(
     VisitorSynchronization::SyncTag tag, bool is_weak, Object* child_obj) {
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry != NULL) {
-    filler_->SetIndexedAutoIndexReference(
-        is_weak ? HeapGraphEdge::kWeak : HeapGraphEdge::kElement,
-        GetNthGcSubrootObject(tag), snapshot_->gc_subroot(tag),
-        child_obj, child_entry);
+    const char* name = GetStrongGcSubrootName(child_obj);
+    if (name != NULL) {
+      filler_->SetNamedReference(
+          HeapGraphEdge::kInternal,
+          snapshot_->gc_subroot(tag)->index(),
+          name,
+          child_entry);
+    } else {
+      filler_->SetIndexedAutoIndexReference(
+          is_weak ? HeapGraphEdge::kWeak : HeapGraphEdge::kElement,
+          snapshot_->gc_subroot(tag)->index(),
+          child_entry);
+    }
   }
 }
 
 
+const char* V8HeapExplorer::GetStrongGcSubrootName(Object* object) {
+  if (strong_gc_subroot_names_.is_empty()) {
+#define NAME_ENTRY(name) strong_gc_subroot_names_.SetTag(heap_->name(), #name);
+#define ROOT_NAME(type, name, camel_name) NAME_ENTRY(name)
+    STRONG_ROOT_LIST(ROOT_NAME)
+#undef ROOT_NAME
+#define STRUCT_MAP_NAME(NAME, Name, name) NAME_ENTRY(name##_map)
+    STRUCT_LIST(STRUCT_MAP_NAME)
+#undef STRUCT_MAP_NAME
+#define SYMBOL_NAME(name, str) NAME_ENTRY(name)
+    SYMBOL_LIST(SYMBOL_NAME)
+#undef SYMBOL_NAME
+#undef NAME_ENTRY
+    CHECK(!strong_gc_subroot_names_.is_empty());
+  }
+  return strong_gc_subroot_names_.GetTag(object);
+}
+
+
 void V8HeapExplorer::TagObject(Object* obj, const char* tag) {
-  if (obj->IsHeapObject() &&
-      !obj->IsOddball() &&
-      obj != heap_->raw_unchecked_empty_byte_array() &&
-      obj != heap_->raw_unchecked_empty_fixed_array() &&
-      obj != heap_->raw_unchecked_empty_descriptor_array()) {
-    objects_tags_.SetTag(obj, tag);
+  if (IsEssentialObject(obj)) {
+    HeapEntry* entry = GetEntry(obj);
+    if (entry->name()[0] == '\0') {
+      entry->set_name(tag);
+    }
   }
 }
 
@@ -2637,7 +2683,7 @@ void V8HeapExplorer::TagGlobalObjects() {
     Handle<JSGlobalObject> global_obj = enumerator.at(i);
     Object* obj_document;
     if (global_obj->GetProperty(*document_string)->ToObject(&obj_document) &&
-       obj_document->IsJSObject()) {
+        obj_document->IsJSObject()) {
       JSObject* document = JSObject::cast(obj_document);
       Object* obj_url;
       if (document->GetProperty(*url_string)->ToObject(&obj_url) &&
@@ -2681,8 +2727,7 @@ class BasicHeapEntriesAllocator : public HeapEntriesAllocator {
       collection_(snapshot_->collection()),
       entries_type_(entries_type) {
   }
-  virtual HeapEntry* AllocateEntry(
-      HeapThing ptr, int children_count, int retainers_count);
+  virtual HeapEntry* AllocateEntry(HeapThing ptr);
  private:
   HeapSnapshot* snapshot_;
   HeapSnapshotsCollection* collection_;
@@ -2690,23 +2735,19 @@ class BasicHeapEntriesAllocator : public HeapEntriesAllocator {
 };
 
 
-HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(
-    HeapThing ptr, int children_count, int retainers_count) {
+HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(HeapThing ptr) {
   v8::RetainedObjectInfo* info = reinterpret_cast<v8::RetainedObjectInfo*>(ptr);
   intptr_t elements = info->GetElementCount();
   intptr_t size = info->GetSizeInBytes();
+  const char* name = elements != -1
+      ? collection_->names()->GetFormatted(
+            "%s / %" V8_PTR_PREFIX "d entries", info->GetLabel(), elements)
+      : collection_->names()->GetCopy(info->GetLabel());
   return snapshot_->AddEntry(
       entries_type_,
-      elements != -1 ?
-          collection_->names()->GetFormatted(
-              "%s / %" V8_PTR_PREFIX "d entries",
-              info->GetLabel(),
-              info->GetElementCount()) :
-          collection_->names()->GetCopy(info->GetLabel()),
+      name,
       HeapObjectsMap::GenerateId(info),
-      size != -1 ? static_cast<int>(size) : 0,
-      children_count,
-      retainers_count);
+      size != -1 ? static_cast<int>(size) : 0);
 }
 
 
@@ -2787,9 +2828,9 @@ void NativeObjectsExplorer::FillImplicitReferences() {
   for (int i = 0; i < groups->length(); ++i) {
     ImplicitRefGroup* group = groups->at(i);
     HeapObject* parent = *group->parent_;
-    HeapEntry* parent_entry =
-        filler_->FindOrAddEntry(parent, native_entries_allocator_);
-    ASSERT(parent_entry != NULL);
+    int parent_entry =
+        filler_->FindOrAddEntry(parent, native_entries_allocator_)->index();
+    ASSERT(parent_entry != HeapEntry::kNoEntry);
     Object*** children = group->children_;
     for (size_t j = 0; j < group->length_; ++j) {
       Object* child = *children[j];
@@ -2797,9 +2838,9 @@ void NativeObjectsExplorer::FillImplicitReferences() {
           filler_->FindOrAddEntry(child, native_entries_allocator_);
       filler_->SetNamedReference(
           HeapGraphEdge::kInternal,
-          parent, parent_entry,
+          parent_entry,
           "native",
-          child, child_entry);
+          child_entry);
     }
   }
 }
@@ -2877,8 +2918,9 @@ NativeGroupRetainedObjectInfo* NativeObjectsExplorer::FindOrAddGroupInfo(
                                        HEAP->HashSeed());
   HashMap::Entry* entry = native_groups_.Lookup(const_cast<char*>(label_copy),
                                                 hash, true);
-  if (entry->value == NULL)
+  if (entry->value == NULL) {
     entry->value = new NativeGroupRetainedObjectInfo(label);
+  }
   return static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
 }
 
@@ -2894,8 +2936,8 @@ void NativeObjectsExplorer::SetNativeRootReference(
       filler_->FindOrAddEntry(group_info, synthetic_entries_allocator_);
   filler_->SetNamedAutoIndexReference(
       HeapGraphEdge::kInternal,
-      group_info, group_entry,
-      info, child_entry);
+      group_entry->index(),
+      child_entry);
 }
 
 
@@ -2907,12 +2949,12 @@ void NativeObjectsExplorer::SetWrapperNativeReferences(
       filler_->FindOrAddEntry(info, native_entries_allocator_);
   ASSERT(info_entry != NULL);
   filler_->SetNamedReference(HeapGraphEdge::kInternal,
-                             wrapper, wrapper_entry,
+                             wrapper_entry->index(),
                              "native",
-                             info, info_entry);
+                             info_entry);
   filler_->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
-                                        info, info_entry,
-                                        wrapper, wrapper_entry);
+                                        info_entry->index(),
+                                        wrapper_entry);
 }
 
 
@@ -2927,8 +2969,8 @@ void NativeObjectsExplorer::SetRootNativeRootsReference() {
     ASSERT(group_entry != NULL);
     filler_->SetIndexedAutoIndexReference(
         HeapGraphEdge::kElement,
-        V8HeapExplorer::kInternalRootObject, snapshot_->root(),
-        group_info, group_entry);
+        snapshot_->root()->index(),
+        group_entry);
   }
 }
 
@@ -2943,56 +2985,6 @@ void NativeObjectsExplorer::VisitSubtreeWrapper(Object** p, uint16_t class_id) {
 }
 
 
-class SnapshotCounter : public SnapshotFillerInterface {
- public:
-  explicit SnapshotCounter(HeapEntriesMap* entries) : entries_(entries) { }
-  HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
-    entries_->Pair(ptr, allocator, HeapEntriesMap::kHeapEntryPlaceholder);
-    return HeapEntriesMap::kHeapEntryPlaceholder;
-  }
-  HeapEntry* FindEntry(HeapThing ptr) {
-    return entries_->Map(ptr);
-  }
-  HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
-    HeapEntry* entry = FindEntry(ptr);
-    return entry != NULL ? entry : AddEntry(ptr, allocator);
-  }
-  void SetIndexedReference(HeapGraphEdge::Type,
-                           HeapThing parent_ptr,
-                           HeapEntry*,
-                           int,
-                           HeapThing child_ptr,
-                           HeapEntry*) {
-    entries_->CountReference(parent_ptr, child_ptr);
-  }
-  void SetIndexedAutoIndexReference(HeapGraphEdge::Type,
-                                    HeapThing parent_ptr,
-                                    HeapEntry*,
-                                    HeapThing child_ptr,
-                                    HeapEntry*) {
-    entries_->CountReference(parent_ptr, child_ptr);
-  }
-  void SetNamedReference(HeapGraphEdge::Type,
-                         HeapThing parent_ptr,
-                         HeapEntry*,
-                         const char*,
-                         HeapThing child_ptr,
-                         HeapEntry*) {
-    entries_->CountReference(parent_ptr, child_ptr);
-  }
-  void SetNamedAutoIndexReference(HeapGraphEdge::Type,
-                                  HeapThing parent_ptr,
-                                  HeapEntry*,
-                                  HeapThing child_ptr,
-                                  HeapEntry*) {
-    entries_->CountReference(parent_ptr, child_ptr);
-  }
-
- private:
-  HeapEntriesMap* entries_;
-};
-
-
 class SnapshotFiller : public SnapshotFillerInterface {
  public:
   explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries)
@@ -3000,64 +2992,48 @@ class SnapshotFiller : public SnapshotFillerInterface {
         collection_(snapshot->collection()),
         entries_(entries) { }
   HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
-    UNREACHABLE();
-    return NULL;
+    HeapEntry* entry = allocator->AllocateEntry(ptr);
+    entries_->Pair(ptr, entry->index());
+    return entry;
   }
   HeapEntry* FindEntry(HeapThing ptr) {
-    return entries_->Map(ptr);
+    int index = entries_->Map(ptr);
+    return index != HeapEntry::kNoEntry ? &snapshot_->entries()[index] : NULL;
   }
   HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
     HeapEntry* entry = FindEntry(ptr);
     return entry != NULL ? entry : AddEntry(ptr, allocator);
   }
   void SetIndexedReference(HeapGraphEdge::Type type,
-                           HeapThing parent_ptr,
-                           HeapEntry* parent_entry,
+                           int parent,
                            int index,
-                           HeapThing child_ptr,
                            HeapEntry* child_entry) {
-    int child_index, retainer_index;
-    entries_->CountReference(
-        parent_ptr, child_ptr, &child_index, &retainer_index);
-    parent_entry->SetIndexedReference(
-        type, child_index, index, child_entry, retainer_index);
+    HeapEntry* parent_entry = &snapshot_->entries()[parent];
+    parent_entry->SetIndexedReference(type, index, child_entry);
   }
   void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
-                                    HeapThing parent_ptr,
-                                    HeapEntry* parent_entry,
-                                    HeapThing child_ptr,
+                                    int parent,
                                     HeapEntry* child_entry) {
-    int child_index, retainer_index;
-    entries_->CountReference(
-        parent_ptr, child_ptr, &child_index, &retainer_index);
-    parent_entry->SetIndexedReference(
-        type, child_index, child_index + 1, child_entry, retainer_index);
+    HeapEntry* parent_entry = &snapshot_->entries()[parent];
+    int index = parent_entry->children_count() + 1;
+    parent_entry->SetIndexedReference(type, index, child_entry);
   }
   void SetNamedReference(HeapGraphEdge::Type type,
-                         HeapThing parent_ptr,
-                         HeapEntry* parent_entry,
+                         int parent,
                          const char* reference_name,
-                         HeapThing child_ptr,
                          HeapEntry* child_entry) {
-    int child_index, retainer_index;
-    entries_->CountReference(
-        parent_ptr, child_ptr, &child_index, &retainer_index);
-    parent_entry->SetNamedReference(
-        type, child_index, reference_name, child_entry, retainer_index);
+    HeapEntry* parent_entry = &snapshot_->entries()[parent];
+    parent_entry->SetNamedReference(type, reference_name, child_entry);
   }
   void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
-                                  HeapThing parent_ptr,
-                                  HeapEntry* parent_entry,
-                                  HeapThing child_ptr,
+                                  int parent,
                                   HeapEntry* child_entry) {
-    int child_index, retainer_index;
-    entries_->CountReference(
-        parent_ptr, child_ptr, &child_index, &retainer_index);
-    parent_entry->SetNamedReference(type,
-                              child_index,
-                              collection_->names()->GetName(child_index + 1),
-                              child_entry,
-                              retainer_index);
+    HeapEntry* parent_entry = &snapshot_->entries()[parent];
+    int index = parent_entry->children_count() + 1;
+    parent_entry->SetNamedReference(
+        type,
+        collection_->names()->GetName(index),
+        child_entry);
   }
 
  private:
@@ -3107,30 +3083,15 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
   debug_heap->Verify();
 #endif
 
-  SetProgressTotal(2);  // 2 passes.
-
-#ifdef DEBUG
-  debug_heap->Verify();
-#endif
-
-  // Pass 1. Iterate heap contents to count entries and references.
-  if (!CountEntriesAndReferences()) return false;
+  SetProgressTotal(1);  // 1 pass.
 
 #ifdef DEBUG
   debug_heap->Verify();
 #endif
 
-  // Allocate memory for entries and references.
-  snapshot_->AllocateEntries(entries_.entries_count(),
-                             entries_.total_children_count(),
-                             entries_.total_retainers_count());
-
-  // Allocate heap objects to entries hash map.
-  entries_.AllocateEntries(V8HeapExplorer::kInternalRootObject);
-
-  // Pass 2. Fill references.
   if (!FillReferences()) return false;
 
+  snapshot_->FillChildrenAndRetainers();
   snapshot_->RememberLastJSObjectId();
 
   if (!SetEntriesDominators()) return false;
@@ -3162,49 +3123,77 @@ bool HeapSnapshotGenerator::ProgressReport(bool force) {
 void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
   if (control_ == NULL) return;
   HeapIterator iterator(HeapIterator::kFilterUnreachable);
-  progress_total_ = (
+  progress_total_ = iterations_count * (
       v8_heap_explorer_.EstimateObjectsCount(&iterator) +
-      dom_explorer_.EstimateObjectsCount()) * iterations_count;
+      dom_explorer_.EstimateObjectsCount());
   progress_counter_ = 0;
 }
 
 
-bool HeapSnapshotGenerator::CountEntriesAndReferences() {
-  SnapshotCounter counter(&entries_);
-  v8_heap_explorer_.AddRootEntries(&counter);
-  return v8_heap_explorer_.IterateAndExtractReferences(&counter)
-      && dom_explorer_.IterateAndExtractReferences(&counter);
-}
-
-
 bool HeapSnapshotGenerator::FillReferences() {
   SnapshotFiller filler(snapshot_, &entries_);
-  // IterateAndExtractReferences cannot set object names because
-  // it makes call to JSObject::LocalLookupRealNamedProperty which
-  // in turn may relocate objects in property maps thus changing the heap
-  // layout and affecting retainer counts. This is not acceptable because
-  // number of retainers must not change between count and fill passes.
-  // To avoid this there's a separate postpass that set object names.
+  v8_heap_explorer_.AddRootEntries(&filler);
   return v8_heap_explorer_.IterateAndExtractReferences(&filler)
-      && dom_explorer_.IterateAndExtractReferences(&filler)
-      && v8_heap_explorer_.IterateAndSetObjectNames(&filler);
+      && dom_explorer_.IterateAndExtractReferences(&filler);
 }
 
 
-void HeapSnapshotGenerator::FillReversePostorderIndexes(
+bool HeapSnapshotGenerator::IsUserGlobalReference(const HeapGraphEdge* edge) {
+  ASSERT(edge->from() == snapshot_->root());
+  return edge->type() == HeapGraphEdge::kShortcut;
+}
+
+
+void HeapSnapshotGenerator::MarkUserReachableObjects() {
+  List<HeapEntry*> worklist;
+
+  Vector<HeapGraphEdge*> children = snapshot_->root()->children();
+  for (int i = 0; i < children.length(); ++i) {
+    if (IsUserGlobalReference(children[i])) {
+      worklist.Add(children[i]->to());
+    }
+  }
+
+  while (!worklist.is_empty()) {
+    HeapEntry* entry = worklist.RemoveLast();
+    if (entry->user_reachable()) continue;
+    entry->set_user_reachable();
+    Vector<HeapGraphEdge*> children = entry->children();
+    for (int i = 0; i < children.length(); ++i) {
+      HeapEntry* child = children[i]->to();
+      if (!child->user_reachable()) {
+        worklist.Add(child);
+      }
+    }
+  }
+}
+
+
+static bool IsRetainingEdge(HeapGraphEdge* edge) {
+  if (edge->type() == HeapGraphEdge::kShortcut) return false;
+  // The edge is not retaining if it goes from system domain
+  // (i.e. an object not reachable from window) to the user domain
+  // (i.e. a reachable object).
+  return edge->from()->user_reachable()
+      || !edge->to()->user_reachable();
+}
+
+
+void HeapSnapshotGenerator::FillPostorderIndexes(
     Vector<HeapEntry*>* entries) {
   snapshot_->ClearPaint();
   int current_entry = 0;
   List<HeapEntry*> nodes_to_visit;
-  nodes_to_visit.Add(snapshot_->root());
+  HeapEntry* root = snapshot_->root();
+  nodes_to_visit.Add(root);
   snapshot_->root()->paint();
   while (!nodes_to_visit.is_empty()) {
     HeapEntry* entry = nodes_to_visit.last();
-    Vector<HeapGraphEdge> children = entry->children();
+    Vector<HeapGraphEdge*> children = entry->children();
     bool has_new_edges = false;
     for (int i = 0; i < children.length(); ++i) {
-      if (children[i].type() == HeapGraphEdge::kShortcut) continue;
-      HeapEntry* child = children[i].to();
+      if (entry != root && !IsRetainingEdge(children[i])) continue;
+      HeapEntry* child = children[i]->to();
       if (!child->painted()) {
         nodes_to_visit.Add(child);
         child->paint();
@@ -3212,7 +3201,7 @@ void HeapSnapshotGenerator::FillReversePostorderIndexes(
       }
     }
     if (!has_new_edges) {
-      entry->set_ordered_index(current_entry);
+      entry->set_postorder_index(current_entry);
       (*entries)[current_entry++] = entry;
       nodes_to_visit.RemoveLast();
     }
@@ -3238,9 +3227,9 @@ bool HeapSnapshotGenerator::BuildDominatorTree(
     const Vector<HeapEntry*>& entries,
     Vector<int>* dominators) {
   if (entries.length() == 0) return true;
+  HeapEntry* root = snapshot_->root();
   const int entries_length = entries.length(), root_index = entries_length - 1;
-  static const int kNoDominator = -1;
-  for (int i = 0; i < root_index; ++i) (*dominators)[i] = kNoDominator;
+  for (int i = 0; i < root_index; ++i) (*dominators)[i] = HeapEntry::kNoEntry;
   (*dominators)[root_index] = root_index;
 
   // The affected array is used to mark entries which dominators
@@ -3248,28 +3237,28 @@ bool HeapSnapshotGenerator::BuildDominatorTree(
   ScopedVector<bool> affected(entries_length);
   for (int i = 0; i < affected.length(); ++i) affected[i] = false;
   // Mark the root direct children as affected.
-  Vector<HeapGraphEdge> children = entries[root_index]->children();
+  Vector<HeapGraphEdge*> children = entries[root_index]->children();
   for (int i = 0; i < children.length(); ++i) {
-    affected[children[i].to()->ordered_index()] = true;
+    affected[children[i]->to()->postorder_index()] = true;
   }
 
   bool changed = true;
   while (changed) {
     changed = false;
-    if (!ProgressReport(true)) return false;
+    if (!ProgressReport(false)) return false;
     for (int i = root_index - 1; i >= 0; --i) {
       if (!affected[i]) continue;
       affected[i] = false;
       // If dominator of the entry has already been set to root,
       // then it can't propagate any further.
       if ((*dominators)[i] == root_index) continue;
-      int new_idom_index = kNoDominator;
+      int new_idom_index = HeapEntry::kNoEntry;
       Vector<HeapGraphEdge*> rets = entries[i]->retainers();
       for (int j = 0; j < rets.length(); ++j) {
-        if (rets[j]->type() == HeapGraphEdge::kShortcut) continue;
-        int ret_index = rets[j]->From()->ordered_index();
-        if (dominators->at(ret_index) != kNoDominator) {
-          new_idom_index = new_idom_index == kNoDominator
+        if (rets[j]->from() != root && !IsRetainingEdge(rets[j])) continue;
+        int ret_index = rets[j]->from()->postorder_index();
+        if (dominators->at(ret_index) != HeapEntry::kNoEntry) {
+          new_idom_index = new_idom_index == HeapEntry::kNoEntry
               ? ret_index
               : Intersect(ret_index, new_idom_index, *dominators);
           // If idom has already reached the root, it doesn't make sense
@@ -3277,13 +3266,13 @@ bool HeapSnapshotGenerator::BuildDominatorTree(
           if (new_idom_index == root_index) break;
         }
       }
-      if (new_idom_index != kNoDominator
+      if (new_idom_index != HeapEntry::kNoEntry
           && dominators->at(i) != new_idom_index) {
         (*dominators)[i] = new_idom_index;
         changed = true;
-        Vector<HeapGraphEdge> children = entries[i]->children();
+        Vector<HeapGraphEdge*> children = entries[i]->children();
         for (int j = 0; j < children.length(); ++j) {
-          affected[children[j].to()->ordered_index()] = true;
+          affected[children[j]->to()->postorder_index()] = true;
         }
       }
     }
@@ -3293,13 +3282,14 @@ bool HeapSnapshotGenerator::BuildDominatorTree(
 
 
 bool HeapSnapshotGenerator::SetEntriesDominators() {
-  // This array is used for maintaining reverse postorder of nodes.
-  ScopedVector<HeapEntry*> ordered_entries(snapshot_->entries()->length());
-  FillReversePostorderIndexes(&ordered_entries);
+  MarkUserReachableObjects();
+  // This array is used for maintaining postorder of nodes.
+  ScopedVector<HeapEntry*> ordered_entries(snapshot_->entries().length());
+  FillPostorderIndexes(&ordered_entries);
   ScopedVector<int> dominators(ordered_entries.length());
   if (!BuildDominatorTree(ordered_entries, &dominators)) return false;
   for (int i = 0; i < ordered_entries.length(); ++i) {
-    ASSERT(dominators[i] >= 0);
+    ASSERT(dominators[i] != HeapEntry::kNoEntry);
     ordered_entries[i]->set_dominator(ordered_entries[dominators[i]]);
   }
   return true;
@@ -3310,17 +3300,18 @@ bool HeapSnapshotGenerator::CalculateRetainedSizes() {
   // As for the dominators tree we only know parent nodes, not
   // children, to sum up total sizes we "bubble" node's self size
   // adding it to all of its parents.
-  List<HeapEntry*>& entries = *snapshot_->entries();
+  List<HeapEntry>& entries = snapshot_->entries();
   for (int i = 0; i < entries.length(); ++i) {
-    HeapEntry* entry = entries[i];
+    HeapEntry* entry = &entries[i];
     entry->set_retained_size(entry->self_size());
   }
   for (int i = 0; i < entries.length(); ++i) {
-    HeapEntry* entry = entries[i];
-    int entry_size = entry->self_size();
-    for (HeapEntry* dominator = entry->dominator();
-         dominator != entry;
-         entry = dominator, dominator = entry->dominator()) {
+    int entry_size = entries[i].self_size();
+    HeapEntry* current = &entries[i];
+    for (HeapEntry* dominator = current->dominator();
+         dominator != current;
+         current = dominator, dominator = current->dominator()) {
+      ASSERT(current->dominator() != NULL);
       dominator->add_retained_size(entry_size);
     }
   }
@@ -3424,19 +3415,23 @@ class OutputStreamWriter {
 };
 
 
+// type, name|index, to_node.
+const int HeapSnapshotJSONSerializer::kEdgeFieldsCount = 3;
+// type, name, id, self_size, retained_size, dominator, children_index.
+const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 7;
+
 void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
   ASSERT(writer_ == NULL);
   writer_ = new OutputStreamWriter(stream);
 
   HeapSnapshot* original_snapshot = NULL;
-  if (snapshot_->raw_entries_size() >=
+  if (snapshot_->RawSnapshotSize() >=
       SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize) {
     // The snapshot is too big. Serialize a fake snapshot.
     original_snapshot = snapshot_;
     snapshot_ = CreateFakeSnapshot();
   }
-  // Since nodes graph is cyclic, we need the first pass to enumerate
-  // them. Strings can be serialized in one pass.
+
   SerializeImpl();
 
   delete writer_;
@@ -3454,30 +3449,35 @@ HeapSnapshot* HeapSnapshotJSONSerializer::CreateFakeSnapshot() {
                                           HeapSnapshot::kFull,
                                           snapshot_->title(),
                                           snapshot_->uid());
-  result->AllocateEntries(2, 1, 0);
-  HeapEntry* root = result->AddRootEntry(1);
+  result->AddRootEntry();
   const char* text = snapshot_->collection()->names()->GetFormatted(
       "The snapshot is too big. "
       "Maximum snapshot size is %"  V8_PTR_PREFIX "u MB. "
       "Actual snapshot size is %"  V8_PTR_PREFIX "u MB.",
       SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize / MB,
-      (snapshot_->raw_entries_size() + MB - 1) / MB);
-  HeapEntry* message = result->AddEntry(
-      HeapEntry::kString, text, 0, 4, 0, 0);
-  root->SetUnidirElementReference(0, 1, message);
+      (snapshot_->RawSnapshotSize() + MB - 1) / MB);
+  HeapEntry* message = result->AddEntry(HeapEntry::kString, text, 0, 4);
+  result->root()->SetIndexedReference(HeapGraphEdge::kElement, 1, message);
+  result->FillChildrenAndRetainers();
   result->SetDominatorsToSelf();
   return result;
 }
 
 
 void HeapSnapshotJSONSerializer::SerializeImpl() {
+  List<HeapEntry>& nodes = snapshot_->entries();
+  ASSERT(0 == snapshot_->root()->index());
   writer_->AddCharacter('{');
   writer_->AddString("\"snapshot\":{");
   SerializeSnapshot();
   if (writer_->aborted()) return;
   writer_->AddString("},\n");
   writer_->AddString("\"nodes\":[");
-  SerializeNodes();
+  SerializeNodes(nodes);
+  if (writer_->aborted()) return;
+  writer_->AddString("],\n");
+  writer_->AddString("\"edges\":[");
+  SerializeEdges(nodes);
   if (writer_->aborted()) return;
   writer_->AddString("],\n");
   writer_->AddString("\"strings\":[");
@@ -3499,16 +3499,9 @@ int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
 }
 
 
-// This function won't work correctly for MIN_INT but this is not
-// a problem in case of heap snapshots serialization.
-static int itoa(int value, const Vector<char>& buffer, int buffer_pos) {
-  if (value < 0) {
-    buffer[buffer_pos++] = '-';
-    value = -value;
-  }
-
+static int utoa(unsigned value, const Vector<char>& buffer, int buffer_pos) {
   int number_of_digits = 0;
-  int t = value;
+  unsigned t = value;
   do {
     ++number_of_digits;
   } while (t /= 10);
@@ -3524,7 +3517,8 @@ static int itoa(int value, const Vector<char>& buffer, int buffer_pos) {
 }
 
 
-void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge) {
+void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge,
+                                               bool first_edge) {
   // The buffer needs space for 3 ints, 3 commas and \0
   static const int kBufferSize =
       MaxDecimalDigitsIn<sizeof(int)>::kSigned * 3 + 3 + 1;  // NOLINT
@@ -3534,127 +3528,70 @@ void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge) {
       || edge->type() == HeapGraphEdge::kWeak
       ? edge->index() : GetStringId(edge->name());
   int buffer_pos = 0;
+  if (!first_edge) {
+    buffer[buffer_pos++] = ',';
+  }
+  buffer_pos = utoa(edge->type(), buffer, buffer_pos);
   buffer[buffer_pos++] = ',';
-  buffer_pos = itoa(edge->type(), buffer, buffer_pos);
-  buffer[buffer_pos++] = ',';
-  buffer_pos = itoa(edge_name_or_index, buffer, buffer_pos);
+  buffer_pos = utoa(edge_name_or_index, buffer, buffer_pos);
   buffer[buffer_pos++] = ',';
-  buffer_pos = itoa(edge->to()->entry_index(), buffer, buffer_pos);
+  buffer_pos = utoa(entry_index(edge->to()), buffer, buffer_pos);
   buffer[buffer_pos++] = '\0';
   writer_->AddString(buffer.start());
 }
 
 
-void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) {
+void HeapSnapshotJSONSerializer::SerializeEdges(const List<HeapEntry>& nodes) {
+  bool first_edge = true;
+  for (int i = 0; i < nodes.length(); ++i) {
+    HeapEntry* entry = &nodes[i];
+    Vector<HeapGraphEdge*> children = entry->children();
+    for (int j = 0; j < children.length(); ++j) {
+      SerializeEdge(children[j], first_edge);
+      first_edge = false;
+      if (writer_->aborted()) return;
+    }
+  }
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry,
+                                               int edges_index) {
   // The buffer needs space for 6 ints, 1 uint32_t, 7 commas, \n and \0
   static const int kBufferSize =
       6 * MaxDecimalDigitsIn<sizeof(int)>::kSigned  // NOLINT
       + MaxDecimalDigitsIn<sizeof(uint32_t)>::kUnsigned  // NOLINT
       + 7 + 1 + 1;
   EmbeddedVector<char, kBufferSize> buffer;
-  Vector<HeapGraphEdge> children = entry->children();
   int buffer_pos = 0;
-  buffer[buffer_pos++] = '\n';
-  buffer[buffer_pos++] = ',';
-  buffer_pos = itoa(entry->type(), buffer, buffer_pos);
+  if (entry_index(entry) != 0) {
+    buffer[buffer_pos++] = ',';
+  }
+  buffer_pos = utoa(entry->type(), buffer, buffer_pos);
   buffer[buffer_pos++] = ',';
-  buffer_pos = itoa(GetStringId(entry->name()), buffer, buffer_pos);
+  buffer_pos = utoa(GetStringId(entry->name()), buffer, buffer_pos);
   buffer[buffer_pos++] = ',';
-  buffer_pos = itoa(entry->id(), buffer, buffer_pos);
+  buffer_pos = utoa(entry->id(), buffer, buffer_pos);
   buffer[buffer_pos++] = ',';
-  buffer_pos = itoa(entry->self_size(), buffer, buffer_pos);
+  buffer_pos = utoa(entry->self_size(), buffer, buffer_pos);
   buffer[buffer_pos++] = ',';
-  buffer_pos = itoa(entry->retained_size(), buffer, buffer_pos);
+  buffer_pos = utoa(entry->retained_size(), buffer, buffer_pos);
   buffer[buffer_pos++] = ',';
-  buffer_pos = itoa(entry->dominator()->entry_index(), buffer, buffer_pos);
+  buffer_pos = utoa(entry_index(entry->dominator()), buffer, buffer_pos);
   buffer[buffer_pos++] = ',';
-  buffer_pos = itoa(children.length(), buffer, buffer_pos);
+  buffer_pos = utoa(edges_index, buffer, buffer_pos);
+  buffer[buffer_pos++] = '\n';
   buffer[buffer_pos++] = '\0';
   writer_->AddString(buffer.start());
-  for (int i = 0; i < children.length(); ++i) {
-    SerializeEdge(&children[i]);
-    if (writer_->aborted()) return;
-  }
 }
 
 
-void HeapSnapshotJSONSerializer::SerializeNodes() {
-  // The first (zero) item of nodes array is an object describing node
-  // serialization layout.  We use a set of macros to improve
-  // readability.
-#define JSON_A(s) "[" s "]"
-#define JSON_O(s) "{" s "}"
-#define JSON_S(s) "\"" s "\""
-  writer_->AddString(JSON_O(
-    JSON_S("fields") ":" JSON_A(
-        JSON_S("type")
-        "," JSON_S("name")
-        "," JSON_S("id")
-        "," JSON_S("self_size")
-        "," JSON_S("retained_size")
-        "," JSON_S("dominator")
-        "," JSON_S("children_count")
-        "," JSON_S("children"))
-    "," JSON_S("types") ":" JSON_A(
-        JSON_A(
-            JSON_S("hidden")
-            "," JSON_S("array")
-            "," JSON_S("string")
-            "," JSON_S("object")
-            "," JSON_S("code")
-            "," JSON_S("closure")
-            "," JSON_S("regexp")
-            "," JSON_S("number")
-            "," JSON_S("native")
-            "," JSON_S("synthetic"))
-        "," JSON_S("string")
-        "," JSON_S("number")
-        "," JSON_S("number")
-        "," JSON_S("number")
-        "," JSON_S("number")
-        "," JSON_S("number")
-        "," JSON_O(
-            JSON_S("fields") ":" JSON_A(
-                JSON_S("type")
-                "," JSON_S("name_or_index")
-                "," JSON_S("to_node"))
-            "," JSON_S("types") ":" JSON_A(
-                JSON_A(
-                    JSON_S("context")
-                    "," JSON_S("element")
-                    "," JSON_S("property")
-                    "," JSON_S("internal")
-                    "," JSON_S("hidden")
-                    "," JSON_S("shortcut")
-                    "," JSON_S("weak"))
-                "," JSON_S("string_or_number")
-                "," JSON_S("node"))))));
-#undef JSON_S
-#undef JSON_O
-#undef JSON_A
-
-  const int node_fields_count = 7;
-  // type,name,id,self_size,retained_size,dominator,children_count.
-  const int edge_fields_count = 3;  // type,name|index,to_node.
-
-  List<HeapEntry*>& nodes = *(snapshot_->entries());
-  // Root must be the first.
-  ASSERT(nodes.first() == snapshot_->root());
-  // Rewrite node indexes, so they refer to actual array positions. Do this
-  // only once.
-  if (nodes[0]->entry_index() == -1) {
-    // Nodes start from array index 1.
-    int index = 1;
-    for (int i = 0; i < nodes.length(); ++i) {
-      HeapEntry* node = nodes[i];
-      node->set_entry_index(index);
-      index += node_fields_count +
-          node->children().length() * edge_fields_count;
-    }
-  }
-
+void HeapSnapshotJSONSerializer::SerializeNodes(const List<HeapEntry>& nodes) {
+  int edges_index = 0;
   for (int i = 0; i < nodes.length(); ++i) {
-    SerializeNode(nodes[i]);
+    HeapEntry* entry = &nodes[i];
+    SerializeNode(entry, edges_index);
+    edges_index += entry->children().length() * kEdgeFieldsCount;
     if (writer_->aborted()) return;
   }
 }
@@ -3666,6 +3603,61 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
   writer_->AddString("\"");
   writer_->AddString(",\"uid\":");
   writer_->AddNumber(snapshot_->uid());
+  writer_->AddString(",\"meta\":");
+  // The object describing node serialization layout.
+  // We use a set of macros to improve readability.
+#define JSON_A(s) "[" s "]"
+#define JSON_O(s) "{" s "}"
+#define JSON_S(s) "\"" s "\""
+  writer_->AddString(JSON_O(
+    JSON_S("node_fields") ":" JSON_A(
+        JSON_S("type") ","
+        JSON_S("name") ","
+        JSON_S("id") ","
+        JSON_S("self_size") ","
+        JSON_S("retained_size") ","
+        JSON_S("dominator") ","
+        JSON_S("edges_index")) ","
+    JSON_S("node_types") ":" JSON_A(
+        JSON_A(
+            JSON_S("hidden") ","
+            JSON_S("array") ","
+            JSON_S("string") ","
+            JSON_S("object") ","
+            JSON_S("code") ","
+            JSON_S("closure") ","
+            JSON_S("regexp") ","
+            JSON_S("number") ","
+            JSON_S("native") ","
+            JSON_S("synthetic")) ","
+        JSON_S("string") ","
+        JSON_S("number") ","
+        JSON_S("number") ","
+        JSON_S("number") ","
+        JSON_S("number") ","
+        JSON_S("number")) ","
+    JSON_S("edge_fields") ":" JSON_A(
+        JSON_S("type") ","
+        JSON_S("name_or_index") ","
+        JSON_S("to_node")) ","
+    JSON_S("edge_types") ":" JSON_A(
+        JSON_A(
+            JSON_S("context") ","
+            JSON_S("element") ","
+            JSON_S("property") ","
+            JSON_S("internal") ","
+            JSON_S("hidden") ","
+            JSON_S("shortcut") ","
+            JSON_S("weak")) ","
+        JSON_S("string_or_number") ","
+        JSON_S("node"))));
+#undef JSON_S
+#undef JSON_O
+#undef JSON_A
+  writer_->AddString(",\"node_count\":");
+  writer_->AddNumber(snapshot_->entries().length());
+  writer_->AddString(",\"edge_count\":");
+  writer_->AddNumber(snapshot_->edges().length());
 }
 
 
index 1fa647e..92896c2 100644 (file)
@@ -446,6 +446,7 @@ class ProfileGenerator {
 
 
 class HeapEntry;
+class HeapSnapshot;
 
 class HeapGraphEdge BASE_EMBEDDED {
  public:
@@ -460,60 +461,45 @@ class HeapGraphEdge BASE_EMBEDDED {
   };
 
   HeapGraphEdge() { }
-  void Init(int child_index, Type type, const char* name, HeapEntry* to);
-  void Init(int child_index, Type type, int index, HeapEntry* to);
-  void Init(int child_index, int index, HeapEntry* to);
+  HeapGraphEdge(Type type, const char* name, int from, int to);
+  HeapGraphEdge(Type type, int index, int from, int to);
+  void ReplaceToIndexWithEntry(HeapSnapshot* snapshot);
 
-  Type type() { return static_cast<Type>(type_); }
-  int index() {
+  Type type() const { return static_cast<Type>(type_); }
+  int index() const {
     ASSERT(type_ == kElement || type_ == kHidden || type_ == kWeak);
     return index_;
   }
-  const char* name() {
+  const char* name() const {
     ASSERT(type_ == kContextVariable
-           || type_ == kProperty
-           || type_ == kInternal
-           || type_ == kShortcut);
+        || type_ == kProperty
+        || type_ == kInternal
+        || type_ == kShortcut);
     return name_;
   }
-  HeapEntry* to() { return to_; }
-
-  HeapEntry* From();
+  INLINE(HeapEntry* from() const);
+  HeapEntry* to() const { return to_entry_; }
 
  private:
-  int child_index_ : 29;
+  INLINE(HeapSnapshot* snapshot() const);
+
   unsigned type_ : 3;
+  int from_index_ : 29;
+  union {
+    // During entries population |to_index_| is used for storing the index,
+    // afterwards it is replaced with a pointer to the entry.
+    int to_index_;
+    HeapEntry* to_entry_;
+  };
   union {
     int index_;
     const char* name_;
   };
-  HeapEntry* to_;
-
-  DISALLOW_COPY_AND_ASSIGN(HeapGraphEdge);
 };
 
 
-class HeapSnapshot;
-
 // HeapEntry instances represent an entity from the heap (or a special
-// virtual node, e.g. root). To make heap snapshots more compact,
-// HeapEntries has a special memory layout (no Vectors or Lists used):
-//
-//   +-----------------+
-//        HeapEntry
-//   +-----------------+
-//      HeapGraphEdge    |
-//           ...         } children_count
-//      HeapGraphEdge    |
-//   +-----------------+
-//      HeapGraphEdge*   |
-//           ...         } retainers_count
-//      HeapGraphEdge*   |
-//   +-----------------+
-//
-// In a HeapSnapshot, all entries are hand-allocated in a continuous array
-// of raw bytes.
-//
+// virtual node, e.g. root).
 class HeapEntry BASE_EMBEDDED {
  public:
   enum Type {
@@ -528,15 +514,14 @@ class HeapEntry BASE_EMBEDDED {
     kNative = v8::HeapGraphNode::kNative,
     kSynthetic = v8::HeapGraphNode::kSynthetic
   };
+  static const int kNoEntry;
 
   HeapEntry() { }
-  void Init(HeapSnapshot* snapshot,
+  HeapEntry(HeapSnapshot* snapshot,
             Type type,
             const char* name,
             SnapshotObjectId id,
-            int self_size,
-            int children_count,
-            int retainers_count);
+            int self_size);
 
   HeapSnapshot* snapshot() { return snapshot_; }
   Type type() { return static_cast<Type>(type_); }
@@ -546,75 +531,65 @@ class HeapEntry BASE_EMBEDDED {
   int self_size() { return self_size_; }
   int retained_size() { return retained_size_; }
   void add_retained_size(int size) { retained_size_ += size; }
-  void set_retained_size(int value) { retained_size_ = value; }
-  int ordered_index() { return ordered_index_; }
-  void set_ordered_index(int value) { ordered_index_ = value; }
-  int entry_index() { return entry_index_; }
-  void set_entry_index(int value) { entry_index_ = value; }
-
-  Vector<HeapGraphEdge> children() {
-    return Vector<HeapGraphEdge>(children_arr(), children_count_); }
+  void set_retained_size(int size) { retained_size_ = size; }
+  INLINE(int index() const);
+  int postorder_index() { return postorder_index_; }
+  void set_postorder_index(int value) { postorder_index_ = value; }
+  int children_count() const { return children_count_; }
+  INLINE(int set_children_index(int index));
+  INLINE(int set_retainers_index(int index));
+  void add_child(HeapGraphEdge* edge) {
+    children_arr()[children_count_++] = edge;
+  }
+  void add_retainer(HeapGraphEdge* edge) {
+    retainers_arr()[retainers_count_++] = edge;
+  }
+  Vector<HeapGraphEdge*> children() {
+    return Vector<HeapGraphEdge*>(children_arr(), children_count_); }
   Vector<HeapGraphEdge*> retainers() {
     return Vector<HeapGraphEdge*>(retainers_arr(), retainers_count_); }
-  HeapEntry* dominator() { return dominator_; }
+  INLINE(HeapEntry* dominator() const);
   void set_dominator(HeapEntry* entry) {
     ASSERT(entry != NULL);
-    dominator_ = entry;
+    dominator_ = entry->index();
   }
   void clear_paint() { painted_ = false; }
   bool painted() { return painted_; }
   void paint() { painted_ = true; }
+  bool user_reachable() { return user_reachable_; }
+  void set_user_reachable() { user_reachable_ = true; }
 
-  void SetIndexedReference(HeapGraphEdge::Type type,
-                           int child_index,
-                           int index,
-                           HeapEntry* entry,
-                           int retainer_index);
-  void SetNamedReference(HeapGraphEdge::Type type,
-                         int child_index,
-                         const char* name,
-                         HeapEntry* entry,
-                         int retainer_index);
-  void SetUnidirElementReference(int child_index, int index, HeapEntry* entry);
-
-  size_t EntrySize() {
-    return EntriesSize(1, children_count_, retainers_count_);
-  }
+  void SetIndexedReference(
+      HeapGraphEdge::Type type, int index, HeapEntry* entry);
+  void SetNamedReference(
+      HeapGraphEdge::Type type, const char* name, HeapEntry* entry);
 
   void Print(
       const char* prefix, const char* edge_name, int max_depth, int indent);
 
   Handle<HeapObject> GetHeapObject();
 
-  static size_t EntriesSize(int entries_count,
-                            int children_count,
-                            int retainers_count);
-
  private:
-  HeapGraphEdge* children_arr() {
-    return reinterpret_cast<HeapGraphEdge*>(this + 1);
-  }
-  HeapGraphEdge** retainers_arr() {
-    return reinterpret_cast<HeapGraphEdge**>(children_arr() + children_count_);
-  }
+  INLINE(HeapGraphEdge** children_arr());
+  INLINE(HeapGraphEdge** retainers_arr());
   const char* TypeAsString();
 
   unsigned painted_: 1;
+  unsigned user_reachable_: 1;
+  int dominator_: 30;
   unsigned type_: 4;
-  int children_count_: 27;
-  int retainers_count_;
+  int retainers_count_: 28;
+  int retainers_index_;
+  int children_count_;
+  int children_index_;
   int self_size_;
   union {
-    int ordered_index_;  // Used during dominator tree building.
-    int retained_size_;  // At that moment, there is no retained size yet.
+    int postorder_index_;  // Used during dominator tree building.
+    int retained_size_;    // At that moment, there is no retained size yet.
   };
-  int entry_index_;
   SnapshotObjectId id_;
-  HeapEntry* dominator_;
   HeapSnapshot* snapshot_;
   const char* name_;
-
-  DISALLOW_COPY_AND_ASSIGN(HeapEntry);
 };
 
 
@@ -635,61 +610,59 @@ class HeapSnapshot {
                Type type,
                const char* title,
                unsigned uid);
-  ~HeapSnapshot();
   void Delete();
 
   HeapSnapshotsCollection* collection() { return collection_; }
   Type type() { return type_; }
   const char* title() { return title_; }
   unsigned uid() { return uid_; }
-  HeapEntry* root() { return root_entry_; }
-  HeapEntry* gc_roots() { return gc_roots_entry_; }
-  HeapEntry* natives_root() { return natives_root_entry_; }
-  HeapEntry* gc_subroot(int index) { return gc_subroot_entries_[index]; }
-  List<HeapEntry*>* entries() { return &entries_; }
-  size_t raw_entries_size() { return raw_entries_size_; }
+  size_t RawSnapshotSize() const;
+  HeapEntry* root() { return &entries_[root_index_]; }
+  HeapEntry* gc_roots() { return &entries_[gc_roots_index_]; }
+  HeapEntry* natives_root() { return &entries_[natives_root_index_]; }
+  HeapEntry* gc_subroot(int index) {
+    return &entries_[gc_subroot_indexes_[index]];
+  }
+  List<HeapEntry>& entries() { return entries_; }
+  List<HeapGraphEdge>& edges() { return edges_; }
+  List<HeapGraphEdge*>& children() { return children_; }
+  List<HeapGraphEdge*>& retainers() { return retainers_; }
   void RememberLastJSObjectId();
   SnapshotObjectId max_snapshot_js_object_id() const {
     return max_snapshot_js_object_id_;
   }
 
-  void AllocateEntries(
-      int entries_count, int children_count, int retainers_count);
   HeapEntry* AddEntry(HeapEntry::Type type,
                       const char* name,
                       SnapshotObjectId id,
-                      int size,
-                      int children_count,
-                      int retainers_count);
-  HeapEntry* AddRootEntry(int children_count);
-  HeapEntry* AddGcRootsEntry(int children_count, int retainers_count);
-  HeapEntry* AddGcSubrootEntry(int tag,
-                               int children_count,
-                               int retainers_count);
-  HeapEntry* AddNativesRootEntry(int children_count, int retainers_count);
+                      int size);
+  HeapEntry* AddRootEntry();
+  HeapEntry* AddGcRootsEntry();
+  HeapEntry* AddGcSubrootEntry(int tag);
+  HeapEntry* AddNativesRootEntry();
   void ClearPaint();
   HeapEntry* GetEntryById(SnapshotObjectId id);
   List<HeapEntry*>* GetSortedEntriesList();
   void SetDominatorsToSelf();
+  void FillChildrenAndRetainers();
 
   void Print(int max_depth);
   void PrintEntriesSize();
 
  private:
-  HeapEntry* GetNextEntryToInit();
-
   HeapSnapshotsCollection* collection_;
   Type type_;
   const char* title_;
   unsigned uid_;
-  HeapEntry* root_entry_;
-  HeapEntry* gc_roots_entry_;
-  HeapEntry* natives_root_entry_;
-  HeapEntry* gc_subroot_entries_[VisitorSynchronization::kNumberOfSyncTags];
-  char* raw_entries_;
-  List<HeapEntry*> entries_;
+  int root_index_;
+  int gc_roots_index_;
+  int natives_root_index_;
+  int gc_subroot_indexes_[VisitorSynchronization::kNumberOfSyncTags];
+  List<HeapEntry> entries_;
+  List<HeapGraphEdge> edges_;
+  List<HeapGraphEdge*> children_;
+  List<HeapGraphEdge*> retainers_;
   List<HeapEntry*> sorted_entries_;
-  size_t raw_entries_size_;
   SnapshotObjectId max_snapshot_js_object_id_;
 
   friend class HeapSnapshotTester;
@@ -701,15 +674,18 @@ class HeapSnapshot {
 class HeapObjectsMap {
  public:
   HeapObjectsMap();
-  ~HeapObjectsMap();
 
   void SnapshotGenerationFinished();
-  SnapshotObjectId FindObject(Address addr);
+  SnapshotObjectId FindEntry(Address addr);
+  SnapshotObjectId FindOrAddEntry(Address addr, unsigned int size);
   void MoveObject(Address from, Address to);
   SnapshotObjectId last_assigned_id() const {
     return next_id_ - kObjectIdStep;
   }
 
+  void StopHeapObjectsTracking();
+  void PushHeapObjectsStats(OutputStream* stream);
+
   static SnapshotObjectId GenerateId(v8::RetainedObjectInfo* info);
   static inline SnapshotObjectId GetNthGcSubrootId(int delta);
 
@@ -722,16 +698,23 @@ class HeapObjectsMap {
 
  private:
   struct EntryInfo {
-    explicit EntryInfo(SnapshotObjectId id) : id(id), accessed(true) { }
-    EntryInfo(SnapshotObjectId id, bool accessed)
-      : id(id),
-        accessed(accessed) { }
+  EntryInfo(SnapshotObjectId id, Address addr, unsigned int size)
+      : id(id), addr(addr), size(size), accessed(true) { }
+  EntryInfo(SnapshotObjectId id, Address addr, unsigned int size, bool accessed)
+      : id(id), addr(addr), size(size), accessed(accessed) { }
     SnapshotObjectId id;
+    Address addr;
+    unsigned int size;
     bool accessed;
   };
+  struct TimeInterval {
+    explicit TimeInterval(SnapshotObjectId id) : id(id), size(0), count(0) { }
+    SnapshotObjectId id;
+    uint32_t size;
+    uint32_t count;
+  };
 
-  void AddEntry(Address addr, SnapshotObjectId id);
-  SnapshotObjectId FindEntry(Address addr);
+  void UpdateHeapObjectsMap();
   void RemoveDeadEntries();
 
   static bool AddressesMatch(void* key1, void* key2) {
@@ -744,10 +727,10 @@ class HeapObjectsMap {
         v8::internal::kZeroHashSeed);
   }
 
-  bool initial_fill_mode_;
   SnapshotObjectId next_id_;
   HashMap entries_map_;
-  List<EntryInfo>* entries_;
+  List<EntryInfo> entries_;
+  List<TimeInterval> time_intervals_;
 
   DISALLOW_COPY_AND_ASSIGN(HeapObjectsMap);
 };
@@ -759,6 +742,11 @@ class HeapSnapshotsCollection {
   ~HeapSnapshotsCollection();
 
   bool is_tracking_objects() { return is_tracking_objects_; }
+  void PushHeapObjectsStats(OutputStream* stream) {
+    return ids_.PushHeapObjectsStats(stream);
+  }
+  void StartHeapObjectsTracking() { is_tracking_objects_ = true; }
+  void StopHeapObjectsTracking() { ids_.StopHeapObjectsTracking(); }
 
   HeapSnapshot* NewSnapshot(
       HeapSnapshot::Type type, const char* name, unsigned uid);
@@ -770,7 +758,12 @@ class HeapSnapshotsCollection {
   StringsStorage* names() { return &names_; }
   TokenEnumerator* token_enumerator() { return token_enumerator_; }
 
-  SnapshotObjectId GetObjectId(Address addr) { return ids_.FindObject(addr); }
+  SnapshotObjectId FindObjectId(Address object_addr) {
+    return ids_.FindEntry(object_addr);
+  }
+  SnapshotObjectId GetObjectId(Address object_addr, int object_size) {
+    return ids_.FindOrAddEntry(object_addr, object_size);
+  }
   Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
   void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); }
   SnapshotObjectId last_assigned_id() const {
@@ -804,8 +797,7 @@ typedef void* HeapThing;
 class HeapEntriesAllocator {
  public:
   virtual ~HeapEntriesAllocator() { }
-  virtual HeapEntry* AllocateEntry(
-      HeapThing ptr, int children_count, int retainers_count) = 0;
+  virtual HeapEntry* AllocateEntry(HeapThing ptr) = 0;
 };
 
 
@@ -814,37 +806,11 @@ class HeapEntriesAllocator {
 class HeapEntriesMap {
  public:
   HeapEntriesMap();
-  ~HeapEntriesMap();
 
-  void AllocateEntries(HeapThing root_object);
-  HeapEntry* Map(HeapThing thing);
-  void Pair(HeapThing thing, HeapEntriesAllocator* allocator, HeapEntry* entry);
-  void CountReference(HeapThing from, HeapThing to,
-                      int* prev_children_count = NULL,
-                      int* prev_retainers_count = NULL);
-
-  int entries_count() { return entries_count_; }
-  int total_children_count() { return total_children_count_; }
-  int total_retainers_count() { return total_retainers_count_; }
-
-  static HeapEntry* const kHeapEntryPlaceholder;
+  int Map(HeapThing thing);
+  void Pair(HeapThing thing, int entry);
 
  private:
-  struct EntryInfo {
-    EntryInfo(HeapEntry* entry, HeapEntriesAllocator* allocator)
-        : entry(entry),
-          allocator(allocator),
-          children_count(0),
-          retainers_count(0) {
-    }
-    HeapEntry* entry;
-    HeapEntriesAllocator* allocator;
-    int children_count;
-    int retainers_count;
-  };
-
-  static inline void AllocateHeapEntryForMapEntry(HashMap::Entry* map_entry);
-
   static uint32_t Hash(HeapThing thing) {
     return ComputeIntegerHash(
         static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)),
@@ -855,9 +821,6 @@ class HeapEntriesMap {
   }
 
   HashMap entries_;
-  int entries_count_;
-  int total_children_count_;
-  int total_retainers_count_;
 
   friend class HeapObjectsSet;
 
@@ -873,6 +836,7 @@ class HeapObjectsSet {
   void Insert(Object* obj);
   const char* GetTag(Object* obj);
   void SetTag(Object* obj, const char* tag);
+  bool is_empty() const { return entries_.occupancy() == 0; }
 
  private:
   HashMap entries_;
@@ -891,26 +855,18 @@ class SnapshotFillerInterface {
   virtual HeapEntry* FindOrAddEntry(HeapThing ptr,
                                     HeapEntriesAllocator* allocator) = 0;
   virtual void SetIndexedReference(HeapGraphEdge::Type type,
-                                   HeapThing parent_ptr,
-                                   HeapEntry* parent_entry,
+                                   int parent_entry,
                                    int index,
-                                   HeapThing child_ptr,
                                    HeapEntry* child_entry) = 0;
   virtual void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
-                                            HeapThing parent_ptr,
-                                            HeapEntry* parent_entry,
-                                            HeapThing child_ptr,
+                                            int parent_entry,
                                             HeapEntry* child_entry) = 0;
   virtual void SetNamedReference(HeapGraphEdge::Type type,
-                                 HeapThing parent_ptr,
-                                 HeapEntry* parent_entry,
+                                 int parent_entry,
                                  const char* reference_name,
-                                 HeapThing child_ptr,
                                  HeapEntry* child_entry) = 0;
   virtual void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
-                                          HeapThing parent_ptr,
-                                          HeapEntry* parent_entry,
-                                          HeapThing child_ptr,
+                                          int parent_entry,
                                           HeapEntry* child_entry) = 0;
 };
 
@@ -929,12 +885,10 @@ class V8HeapExplorer : public HeapEntriesAllocator {
   V8HeapExplorer(HeapSnapshot* snapshot,
                  SnapshottingProgressReportingInterface* progress);
   virtual ~V8HeapExplorer();
-  virtual HeapEntry* AllocateEntry(
-      HeapThing ptr, int children_count, int retainers_count);
+  virtual HeapEntry* AllocateEntry(HeapThing ptr);
   void AddRootEntries(SnapshotFillerInterface* filler);
   int EstimateObjectsCount(HeapIterator* iterator);
   bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
-  bool IterateAndSetObjectNames(SnapshotFillerInterface* filler);
   void TagGlobalObjects();
 
   static String* GetConstructorName(JSObject* object);
@@ -942,66 +896,77 @@ class V8HeapExplorer : public HeapEntriesAllocator {
   static HeapObject* const kInternalRootObject;
 
  private:
-  HeapEntry* AddEntry(
-      HeapObject* object, int children_count, int retainers_count);
+  HeapEntry* AddEntry(HeapObject* object);
   HeapEntry* AddEntry(HeapObject* object,
                       HeapEntry::Type type,
-                      const char* name,
-                      int children_count,
-                      int retainers_count);
+                      const char* name);
   const char* GetSystemEntryName(HeapObject* object);
+
   void ExtractReferences(HeapObject* obj);
-  void ExtractClosureReferences(JSObject* js_obj, HeapEntry* entry);
-  void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry);
-  void ExtractElementReferences(JSObject* js_obj, HeapEntry* entry);
-  void ExtractInternalReferences(JSObject* js_obj, HeapEntry* entry);
+  void ExtractJSGlobalProxyReferences(JSGlobalProxy* proxy);
+  void ExtractJSObjectReferences(int entry, JSObject* js_obj);
+  void ExtractStringReferences(int entry, String* obj);
+  void ExtractContextReferences(int entry, Context* context);
+  void ExtractMapReferences(int entry, Map* map);
+  void ExtractSharedFunctionInfoReferences(int entry,
+                                           SharedFunctionInfo* shared);
+  void ExtractScriptReferences(int entry, Script* script);
+  void ExtractCodeCacheReferences(int entry, CodeCache* code_cache);
+  void ExtractCodeReferences(int entry, Code* code);
+  void ExtractJSGlobalPropertyCellReferences(int entry,
+                                             JSGlobalPropertyCell* cell);
+  void ExtractClosureReferences(JSObject* js_obj, int entry);
+  void ExtractPropertyReferences(JSObject* js_obj, int entry);
+  void ExtractElementReferences(JSObject* js_obj, int entry);
+  void ExtractInternalReferences(JSObject* js_obj, int entry);
+  bool IsEssentialObject(Object* object);
   void SetClosureReference(HeapObject* parent_obj,
-                           HeapEntry* parent,
+                           int parent,
                            String* reference_name,
                            Object* child);
   void SetNativeBindReference(HeapObject* parent_obj,
-                              HeapEntry* parent,
+                              int parent,
                               const char* reference_name,
                               Object* child);
   void SetElementReference(HeapObject* parent_obj,
-                           HeapEntry* parent,
+                           int parent,
                            int index,
                            Object* child);
   void SetInternalReference(HeapObject* parent_obj,
-                            HeapEntry* parent,
+                            int parent,
                             const char* reference_name,
                             Object* child,
                             int field_offset = -1);
   void SetInternalReference(HeapObject* parent_obj,
-                            HeapEntry* parent,
+                            int parent,
                             int index,
                             Object* child,
                             int field_offset = -1);
   void SetHiddenReference(HeapObject* parent_obj,
-                          HeapEntry* parent,
+                          int parent,
                           int index,
                           Object* child);
   void SetWeakReference(HeapObject* parent_obj,
-                        HeapEntry* parent_entry,
+                        int parent,
                         int index,
                         Object* child_obj,
                         int field_offset);
   void SetPropertyReference(HeapObject* parent_obj,
-                            HeapEntry* parent,
+                            int parent,
                             String* reference_name,
                             Object* child,
                             const char* name_format_string = NULL,
                             int field_offset = -1);
   void SetPropertyShortcutReference(HeapObject* parent_obj,
-                                    HeapEntry* parent,
+                                    int parent,
                                     String* reference_name,
                                     Object* child);
-  void SetRootShortcutReference(Object* child);
+  void SetUserGlobalReference(Object* user_global);
   void SetRootGcRootsReference();
   void SetGcRootsReference(VisitorSynchronization::SyncTag tag);
   void SetGcSubrootReference(
       VisitorSynchronization::SyncTag tag, bool is_weak, Object* child);
-  void SetObjectName(HeapObject* object);
+  const char* GetStrongGcSubrootName(Object* object);
   void TagObject(Object* obj, const char* tag);
 
   HeapEntry* GetEntry(Object* obj);
@@ -1015,6 +980,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
   SnapshottingProgressReportingInterface* progress_;
   SnapshotFillerInterface* filler_;
   HeapObjectsSet objects_tags_;
+  HeapObjectsSet strong_gc_subroot_names_;
 
   static HeapObject* const kGcRootsObject;
   static HeapObject* const kFirstGcSubrootObject;
@@ -1098,9 +1064,10 @@ class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface {
   bool BuildDominatorTree(const Vector<HeapEntry*>& entries,
                           Vector<int>* dominators);
   bool CalculateRetainedSizes();
-  bool CountEntriesAndReferences();
   bool FillReferences();
-  void FillReversePostorderIndexes(Vector<HeapEntry*>* entries);
+  void FillPostorderIndexes(Vector<HeapEntry*>* entries);
+  bool IsUserGlobalReference(const HeapGraphEdge* edge);
+  void MarkUserReachableObjects();
   void ProgressStep();
   bool ProgressReport(bool force = false);
   bool SetEntriesDominators();
@@ -1145,16 +1112,19 @@ class HeapSnapshotJSONSerializer {
 
   HeapSnapshot* CreateFakeSnapshot();
   int GetStringId(const char* s);
-  void SerializeEdge(HeapGraphEdge* edge);
+  int entry_index(HeapEntry* e) { return e->index() * kNodeFieldsCount; }
+  void SerializeEdge(HeapGraphEdge* edge, bool first_edge);
+  void SerializeEdges(const List<HeapEntry>& nodes);
   void SerializeImpl();
-  void SerializeNode(HeapEntry* entry);
-  void SerializeNodes();
+  void SerializeNode(HeapEntry* entry, int edges_index);
+  void SerializeNodes(const List<HeapEntry>& nodes);
   void SerializeSnapshot();
   void SerializeString(const unsigned char* s);
   void SerializeStrings();
   void SortHashMap(HashMap* map, List<HashMap::Entry*>* sorted_entries);
 
-  static const int kMaxSerializableSnapshotRawSize;
+  static const int kEdgeFieldsCount;
+  static const int kNodeFieldsCount;
 
   HeapSnapshot* snapshot_;
   HashMap strings_;
index 04f78b2..ba5e3c8 100644 (file)
@@ -214,13 +214,6 @@ class LookupResult BASE_EMBEDDED {
     number_ = number;
   }
 
-  void DescriptorResult(JSObject* holder, Smi* details, int number) {
-    lookup_type_ = DESCRIPTOR_TYPE;
-    holder_ = holder;
-    details_ = PropertyDetails(details);
-    number_ = number;
-  }
-
   void ConstantResult(JSObject* holder) {
     lookup_type_ = CONSTANT_TYPE;
     holder_ = holder;
index bc9508d..a574f62 100644 (file)
@@ -278,11 +278,7 @@ function TrimRegExp(regexp) {
 
 
 function RegExpToString() {
-  // If this.source is an empty string, output /(?:)/.
-  // http://bugzilla.mozilla.org/show_bug.cgi?id=225550
-  // ecma_2/RegExp/properties-001.js.
-  var src = this.source ? this.source : '(?:)';
-  var result = '/' + src + '/';
+  var result = '/' + this.source + '/';
   if (this.global) result += 'g';
   if (this.ignoreCase) result += 'i';
   if (this.multiline) result += 'm';
@@ -296,7 +292,7 @@ function RegExpToString() {
 // of the last successful match.
 function RegExpGetLastMatch() {
   if (lastMatchInfoOverride !== null) {
-    return lastMatchInfoOverride[0];
+    return OVERRIDE_MATCH(lastMatchInfoOverride);
   }
   var regExpSubject = LAST_SUBJECT(lastMatchInfo);
   return SubString(regExpSubject,
@@ -334,8 +330,8 @@ function RegExpGetLeftContext() {
     subject = LAST_SUBJECT(lastMatchInfo);
   } else {
     var override = lastMatchInfoOverride;
-    start_index = override[override.length - 2];
-    subject = override[override.length - 1];
+    start_index = OVERRIDE_POS(override);
+    subject = OVERRIDE_SUBJECT(override);
   }
   return SubString(subject, 0, start_index);
 }
@@ -349,8 +345,9 @@ function RegExpGetRightContext() {
     subject = LAST_SUBJECT(lastMatchInfo);
   } else {
     var override = lastMatchInfoOverride;
-    subject = override[override.length - 1];
-    start_index = override[override.length - 2] + subject.length;
+    subject = OVERRIDE_SUBJECT(override);
+    var match = OVERRIDE_MATCH(override);
+    start_index = OVERRIDE_POS(override) + match.length;
   }
   return SubString(subject, start_index, subject.length);
 }
@@ -362,7 +359,9 @@ function RegExpGetRightContext() {
 function RegExpMakeCaptureGetter(n) {
   return function() {
     if (lastMatchInfoOverride) {
-      if (n < lastMatchInfoOverride.length - 2) return lastMatchInfoOverride[n];
+      if (n < lastMatchInfoOverride.length - 2) {
+        return OVERRIDE_CAPTURE(lastMatchInfoOverride, n);
+      }
       return '';
     }
     var index = n * 2;
index 2794fac..a42f90c 100644 (file)
@@ -1272,112 +1272,96 @@ static Failure* ThrowRedeclarationError(Isolate* isolate,
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
   ASSERT(args.length() == 3);
   HandleScope scope(isolate);
+  Handle<GlobalObject> global = Handle<GlobalObject>(
+      isolate->context()->global());
 
   Handle<Context> context = args.at<Context>(0);
   CONVERT_ARG_HANDLE_CHECKED(FixedArray, pairs, 1);
   CONVERT_SMI_ARG_CHECKED(flags, 2);
 
-  Handle<JSObject> js_global = Handle<JSObject>(isolate->context()->global());
-  Handle<JSObject> qml_global = Handle<JSObject>(isolate->context()->qml_global());
-
   // Traverse the name/value pairs and set the properties.
   int length = pairs->length();
-  for (int i = 0; i < length; i += 3) {
+  for (int i = 0; i < length; i += 2) {
     HandleScope scope(isolate);
     Handle<String> name(String::cast(pairs->get(i)));
     Handle<Object> value(pairs->get(i + 1), isolate);
-    Handle<Smi> is_qml_global(Smi::cast(pairs->get(i + 2)));
-
-    Handle<JSObject> global = is_qml_global->value()?qml_global:js_global;
 
     // We have to declare a global const property. To capture we only
     // assign to it when evaluating the assignment for "const x =
     // <expr>" the initial value is the hole.
-    bool is_const_property = value->IsTheHole();
-    bool is_function_declaration = false;
-    if (value->IsUndefined() || is_const_property) {
+    bool is_var = value->IsUndefined();
+    bool is_const = value->IsTheHole();
+    bool is_function = value->IsSharedFunctionInfo();
+    bool is_module = value->IsJSModule();
+    ASSERT(is_var + is_const + is_function + is_module == 1);
+
+    if (is_var || is_const) {
       // Lookup the property in the global object, and don't set the
       // value of the variable if the property is already there.
+      // Do the lookup locally only, see ES5 errata.
       LookupResult lookup(isolate);
-      global->Lookup(*name, &lookup, true);
+      if (FLAG_es52_globals)
+        global->LocalLookup(*name, &lookup);
+      else
+        global->Lookup(*name, &lookup);
       if (lookup.IsProperty()) {
         // We found an existing property. Unless it was an interceptor
         // that claims the property is absent, skip this declaration.
-        if (lookup.type() != INTERCEPTOR) {
-          continue;
-        }
+        if (lookup.type() != INTERCEPTOR) continue;
         PropertyAttributes attributes = global->GetPropertyAttribute(*name);
-        if (attributes != ABSENT) {
-          continue;
-        }
+        if (attributes != ABSENT) continue;
         // Fall-through and introduce the absent property by using
         // SetProperty.
       }
-    } else {
-      is_function_declaration = true;
+    } else if (is_function) {
       // Copy the function and update its context. Use it as value.
       Handle<SharedFunctionInfo> shared =
           Handle<SharedFunctionInfo>::cast(value);
       Handle<JSFunction> function =
-          isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
-                                                                context,
-                                                                TENURED);
+          isolate->factory()->NewFunctionFromSharedFunctionInfo(
+              shared, context, TENURED);
       value = function;
     }
 
     LookupResult lookup(isolate);
-    global->LocalLookup(*name, &lookup, true);
+    global->LocalLookup(*name, &lookup);
 
-    // Compute the property attributes. According to ECMA-262, section
-    // 13, page 71, the property must be read-only and
-    // non-deletable. However, neither SpiderMonkey nor KJS creates the
-    // property as read-only, so we don't either.
+    // Compute the property attributes. According to ECMA-262,
+    // the property must be non-configurable except in eval.
     int attr = NONE;
-    if (!DeclareGlobalsEvalFlag::decode(flags)) {
+    bool is_eval = DeclareGlobalsEvalFlag::decode(flags);
+    if (!is_eval || is_module) {
       attr |= DONT_DELETE;
     }
     bool is_native = DeclareGlobalsNativeFlag::decode(flags);
-    if (is_const_property || (is_native && is_function_declaration)) {
+    if (is_const || is_module || (is_native && is_function)) {
       attr |= READ_ONLY;
     }
 
     LanguageMode language_mode = DeclareGlobalsLanguageMode::decode(flags);
 
-    // Safari does not allow the invocation of callback setters for
-    // function declarations. To mimic this behavior, we do not allow
-    // the invocation of setters for function values. This makes a
-    // difference for global functions with the same names as event
-    // handlers such as "function onload() {}". Firefox does call the
-    // onload setter in those case and Safari does not. We follow
-    // Safari for compatibility.
-    if (is_function_declaration) {
-      if (lookup.IsProperty() && (lookup.type() != INTERCEPTOR)) {
-        // Do not overwrite READ_ONLY properties.
-        if (lookup.GetAttributes() & READ_ONLY) {
-          if (language_mode != CLASSIC_MODE) {
-            Handle<Object> args[] = { name };
-            return isolate->Throw(*isolate->factory()->NewTypeError(
-                "strict_cannot_assign", HandleVector(args, ARRAY_SIZE(args))));
-          }
-          continue;
+    if (!lookup.IsProperty() || is_function || is_module) {
+      // If the local property exists, check that we can reconfigure it
+      // as required for function declarations.
+      if (lookup.IsProperty() && lookup.IsDontDelete()) {
+        if (lookup.IsReadOnly() || lookup.IsDontEnum() ||
+            lookup.type() == CALLBACKS) {
+          return ThrowRedeclarationError(
+              isolate, is_function ? "function" : "module", name);
         }
-        // Do not change DONT_DELETE to false from true.
-        attr |= lookup.GetAttributes() & DONT_DELETE;
+        // If the existing property is not configurable, keep its attributes.
+        attr = lookup.GetAttributes();
       }
-      PropertyAttributes attributes = static_cast<PropertyAttributes>(attr);
-
-      RETURN_IF_EMPTY_HANDLE(
-          isolate,
-          JSObject::SetLocalPropertyIgnoreAttributes(global, name, value,
-                                                     attributes));
+      // Define or redefine own property.
+      RETURN_IF_EMPTY_HANDLE(isolate,
+          JSObject::SetLocalPropertyIgnoreAttributes(
+              global, name, value, static_cast<PropertyAttributes>(attr)));
     } else {
-      RETURN_IF_EMPTY_HANDLE(
-          isolate,
-          JSReceiver::SetProperty(global, name, value,
-                                  static_cast<PropertyAttributes>(attr),
-                                  language_mode == CLASSIC_MODE
-                                      ? kNonStrictMode : kStrictMode,
-                                  true));
+      // Do a [[Put]] on the existing (own) property.
+      RETURN_IF_EMPTY_HANDLE(isolate,
+          JSObject::SetProperty(
+              global, name, value, static_cast<PropertyAttributes>(attr),
+              language_mode == CLASSIC_MODE ? kNonStrictMode : kStrictMode));
     }
   }
 
@@ -1410,6 +1394,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
 
   if (attributes != ABSENT) {
     // The name was declared before; check for conflicting re-declarations.
+    // Note: this is actually inconsistent with what happens for globals (where
+    // we silently ignore such declarations).
     if (((attributes & READ_ONLY) != 0) || (mode == READ_ONLY)) {
       // Functions are not read-only.
       ASSERT(mode != READ_ONLY || initial_value->IsTheHole());
@@ -1472,9 +1458,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
         return ThrowRedeclarationError(isolate, "const", name);
       }
     }
-    RETURN_IF_EMPTY_HANDLE(
-        isolate,
-        JSReceiver::SetProperty(object, name, value, mode, kNonStrictMode));
+    if (object->IsJSGlobalObject()) {
+      // Define own property on the global object.
+      RETURN_IF_EMPTY_HANDLE(isolate,
+         JSObject::SetLocalPropertyIgnoreAttributes(object, name, value, mode));
+    } else {
+      RETURN_IF_EMPTY_HANDLE(isolate,
+         JSReceiver::SetProperty(object, name, value, mode, kNonStrictMode));
+    }
   }
 
   return isolate->heap()->undefined_value();
@@ -1485,25 +1476,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
   NoHandleAllocation nha;
   // args[0] == name
   // args[1] == language_mode
-  // args[2] == qml_mode
-  // args[3] == value (optional)
+  // args[2] == value (optional)
 
   // Determine if we need to assign to the variable if it already
   // exists (based on the number of arguments).
-  RUNTIME_ASSERT(args.length() == 3 || args.length() == 4);
-  bool assign = args.length() == 4;
+  RUNTIME_ASSERT(args.length() == 2 || args.length() == 3);
+  bool assign = args.length() == 3;
 
   CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+  GlobalObject* global = isolate->context()->global();
   RUNTIME_ASSERT(args[1]->IsSmi());
   CONVERT_LANGUAGE_MODE_ARG(language_mode, 1);
   StrictModeFlag strict_mode_flag = (language_mode == CLASSIC_MODE)
       ? kNonStrictMode : kStrictMode;
 
-  RUNTIME_ASSERT(args[2]->IsSmi());
-  int qml_mode = Smi::cast(args[2])->value();
-
-  JSObject* global = qml_mode?isolate->context()->qml_global():isolate->context()->global();
-
   // According to ECMA-262, section 12.2, page 62, the property must
   // not be deletable.
   PropertyAttributes attributes = DONT_DELETE;
@@ -1520,7 +1506,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
   while (object->IsJSObject() &&
          JSObject::cast(object)->map()->is_hidden_prototype()) {
     JSObject* raw_holder = JSObject::cast(object);
-    raw_holder->LocalLookup(*name, &lookup, true);
+    raw_holder->LocalLookup(*name, &lookup);
     if (lookup.IsFound() && lookup.type() == INTERCEPTOR) {
       HandleScope handle_scope(isolate);
       Handle<JSObject> holder(raw_holder);
@@ -1531,7 +1517,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
         // Found an interceptor that's not read only.
         if (assign) {
           return raw_holder->SetProperty(
-              &lookup, *name, args[3], attributes, strict_mode_flag);
+              &lookup, *name, args[2], attributes, strict_mode_flag);
         } else {
           return isolate->heap()->undefined_value();
         }
@@ -1541,10 +1527,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
   }
 
   // Reload global in case the loop above performed a GC.
-  global = qml_mode?isolate->context()->qml_global():isolate->context()->global();
+  global = isolate->context()->global();
   if (assign) {
-    return global->SetProperty(
-        *name, args[3], attributes, strict_mode_flag, true);
+    return global->SetProperty(*name, args[2], attributes, strict_mode_flag);
   }
   return isolate->heap()->undefined_value();
 }
@@ -1554,15 +1539,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
   // All constants are declared with an initial value. The name
   // of the constant is the first argument and the initial value
   // is the second.
-  RUNTIME_ASSERT(args.length() == 3);
+  RUNTIME_ASSERT(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
   Handle<Object> value = args.at<Object>(1);
 
-  RUNTIME_ASSERT(args[2]->IsSmi());
-  int qml_mode = Smi::cast(args[2])->value();
-
   // Get the current global object from top.
-  JSObject* global = qml_mode?isolate->context()->qml_global():isolate->context()->global();
+  GlobalObject* global = isolate->context()->global();
 
   // According to ECMA-262, section 12.2, page 62, the property must
   // not be deletable. Since it's a const, it must be READ_ONLY too.
@@ -1586,7 +1568,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
     // Restore global object from context (in case of GC) and continue
     // with setting the value.
     HandleScope handle_scope(isolate);
-    Handle<JSObject> global(qml_mode?isolate->context()->qml_global():isolate->context()->global());
+    Handle<GlobalObject> global(isolate->context()->global());
 
     // BUG 1213575: Handle the case where we have to set a read-only
     // property through an interceptor and only do it if it's
@@ -1801,6 +1783,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
   ASSERT(args.length() == 5);
   CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
   CONVERT_ARG_CHECKED(String, source, 1);
+  // If source is the empty string we set it to "(?:)" instead as
+  // suggested by ECMA-262, 5th, section 15.10.4.1.
+  if (source->length() == 0) source = isolate->heap()->query_colon_symbol();
 
   Object* global = args[2];
   if (!global->IsTrue()) global = isolate->heap()->false_value();
@@ -2115,7 +2100,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) {
     DescriptorArray* instance_desc = function->map()->instance_descriptors();
     int index = instance_desc->Search(name);
     ASSERT(index != DescriptorArray::kNotFound);
-    PropertyDetails details(instance_desc->GetDetails(index));
+    PropertyDetails details = instance_desc->GetDetails(index);
     CallbacksDescriptor new_desc(name,
         instance_desc->GetValue(index),
         static_cast<PropertyAttributes>(details.attributes() | READ_ONLY),
@@ -2900,12 +2885,79 @@ void FindStringIndicesDispatch(Isolate* isolate,
 }
 
 
+// Two smis before and after the match, for very long strings.
+const int kMaxBuilderEntriesPerRegExpMatch = 5;
+
+
+static void SetLastMatchInfoNoCaptures(Handle<String> subject,
+                                       Handle<JSArray> last_match_info,
+                                       int match_start,
+                                       int match_end) {
+  // Fill last_match_info with a single capture.
+  last_match_info->EnsureSize(2 + RegExpImpl::kLastMatchOverhead);
+  AssertNoAllocation no_gc;
+  FixedArray* elements = FixedArray::cast(last_match_info->elements());
+  RegExpImpl::SetLastCaptureCount(elements, 2);
+  RegExpImpl::SetLastInput(elements, *subject);
+  RegExpImpl::SetLastSubject(elements, *subject);
+  RegExpImpl::SetCapture(elements, 0, match_start);
+  RegExpImpl::SetCapture(elements, 1, match_end);
+}
+
+
+template <typename SubjectChar, typename PatternChar>
+static bool SearchStringMultiple(Isolate* isolate,
+                                 Vector<const SubjectChar> subject,
+                                 Vector<const PatternChar> pattern,
+                                 String* pattern_string,
+                                 FixedArrayBuilder* builder,
+                                 int* match_pos) {
+  int pos = *match_pos;
+  int subject_length = subject.length();
+  int pattern_length = pattern.length();
+  int max_search_start = subject_length - pattern_length;
+  StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
+  while (pos <= max_search_start) {
+    if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
+      *match_pos = pos;
+      return false;
+    }
+    // Position of end of previous match.
+    int match_end = pos + pattern_length;
+    int new_pos = search.Search(subject, match_end);
+    if (new_pos >= 0) {
+      // A match.
+      if (new_pos > match_end) {
+        ReplacementStringBuilder::AddSubjectSlice(builder,
+            match_end,
+            new_pos);
+      }
+      pos = new_pos;
+      builder->Add(pattern_string);
+    } else {
+      break;
+    }
+  }
+
+  if (pos < max_search_start) {
+    ReplacementStringBuilder::AddSubjectSlice(builder,
+                                              pos + pattern_length,
+                                              subject_length);
+  }
+  *match_pos = pos;
+  return true;
+}
+
+
+
+
 template<typename ResultSeqString>
-MUST_USE_RESULT static MaybeObject* StringReplaceStringWithString(
+MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString(
     Isolate* isolate,
     Handle<String> subject,
     Handle<JSRegExp> pattern_regexp,
-    Handle<String> replacement) {
+    Handle<String> replacement,
+    Handle<JSArray> last_match_info) {
   ASSERT(subject->IsFlat());
   ASSERT(replacement->IsFlat());
 
@@ -2964,6 +3016,12 @@ MUST_USE_RESULT static MaybeObject* StringReplaceStringWithString(
                         subject_pos,
                         subject_len);
   }
+
+  SetLastMatchInfoNoCaptures(subject,
+                             last_match_info,
+                             indices.at(matches - 1),
+                             indices.at(matches - 1) + pattern_len);
+
   return *result;
 }
 
@@ -3012,11 +3070,19 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
       compiled_replacement.simple_hint()) {
     if (subject_handle->HasOnlyAsciiChars() &&
         replacement_handle->HasOnlyAsciiChars()) {
-      return StringReplaceStringWithString<SeqAsciiString>(
-          isolate, subject_handle, regexp_handle, replacement_handle);
+      return StringReplaceAtomRegExpWithString<SeqAsciiString>(
+          isolate,
+          subject_handle,
+          regexp_handle,
+          replacement_handle,
+          last_match_info_handle);
     } else {
-      return StringReplaceStringWithString<SeqTwoByteString>(
-          isolate, subject_handle, regexp_handle, replacement_handle);
+      return StringReplaceAtomRegExpWithString<SeqTwoByteString>(
+          isolate,
+          subject_handle,
+          regexp_handle,
+          replacement_handle,
+          last_match_info_handle);
     }
   }
 
@@ -3105,21 +3171,29 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
 
   Handle<String> subject_handle(subject);
   Handle<JSRegExp> regexp_handle(regexp);
+  Handle<JSArray> last_match_info_handle(last_match_info);
 
   // Shortcut for simple non-regexp global replacements
   if (regexp_handle->GetFlags().is_global() &&
       regexp_handle->TypeTag() == JSRegExp::ATOM) {
     Handle<String> empty_string_handle(HEAP->empty_string());
     if (subject_handle->HasOnlyAsciiChars()) {
-      return StringReplaceStringWithString<SeqAsciiString>(
-          isolate, subject_handle, regexp_handle, empty_string_handle);
+      return StringReplaceAtomRegExpWithString<SeqAsciiString>(
+          isolate,
+          subject_handle,
+          regexp_handle,
+          empty_string_handle,
+          last_match_info_handle);
     } else {
-      return StringReplaceStringWithString<SeqTwoByteString>(
-          isolate, subject_handle, regexp_handle, empty_string_handle);
+      return StringReplaceAtomRegExpWithString<SeqTwoByteString>(
+          isolate,
+          subject_handle,
+          regexp_handle,
+          empty_string_handle,
+          last_match_info_handle);
     }
   }
 
-  Handle<JSArray> last_match_info_handle(last_match_info);
   Handle<Object> match = RegExpImpl::Exec(regexp_handle,
                                           subject_handle,
                                           0,
@@ -3139,6 +3213,10 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
     end = RegExpImpl::GetCapture(match_info_array, 1);
   }
 
+  bool global = regexp_handle->GetFlags().is_global();
+
+  if (start == end && !global) return *subject_handle;
+
   int length = subject_handle->length();
   int new_length = length - (end - start);
   if (new_length == 0) {
@@ -3154,7 +3232,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
   }
 
   // If the regexp isn't global, only match once.
-  if (!regexp_handle->GetFlags().is_global()) {
+  if (!global) {
     if (start > 0) {
       String::WriteToFlat(*subject_handle,
                           answer->GetChars(),
@@ -3653,70 +3731,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringMatch) {
 }
 
 
-// Two smis before and after the match, for very long strings.
-const int kMaxBuilderEntriesPerRegExpMatch = 5;
-
-
-static void SetLastMatchInfoNoCaptures(Handle<String> subject,
-                                       Handle<JSArray> last_match_info,
-                                       int match_start,
-                                       int match_end) {
-  // Fill last_match_info with a single capture.
-  last_match_info->EnsureSize(2 + RegExpImpl::kLastMatchOverhead);
-  AssertNoAllocation no_gc;
-  FixedArray* elements = FixedArray::cast(last_match_info->elements());
-  RegExpImpl::SetLastCaptureCount(elements, 2);
-  RegExpImpl::SetLastInput(elements, *subject);
-  RegExpImpl::SetLastSubject(elements, *subject);
-  RegExpImpl::SetCapture(elements, 0, match_start);
-  RegExpImpl::SetCapture(elements, 1, match_end);
-}
-
-
-template <typename SubjectChar, typename PatternChar>
-static bool SearchStringMultiple(Isolate* isolate,
-                                 Vector<const SubjectChar> subject,
-                                 Vector<const PatternChar> pattern,
-                                 String* pattern_string,
-                                 FixedArrayBuilder* builder,
-                                 int* match_pos) {
-  int pos = *match_pos;
-  int subject_length = subject.length();
-  int pattern_length = pattern.length();
-  int max_search_start = subject_length - pattern_length;
-  StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
-  while (pos <= max_search_start) {
-    if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
-      *match_pos = pos;
-      return false;
-    }
-    // Position of end of previous match.
-    int match_end = pos + pattern_length;
-    int new_pos = search.Search(subject, match_end);
-    if (new_pos >= 0) {
-      // A match.
-      if (new_pos > match_end) {
-        ReplacementStringBuilder::AddSubjectSlice(builder,
-            match_end,
-            new_pos);
-      }
-      pos = new_pos;
-      builder->Add(pattern_string);
-    } else {
-      break;
-    }
-  }
-
-  if (pos < max_search_start) {
-    ReplacementStringBuilder::AddSubjectSlice(builder,
-                                              pos + pattern_length,
-                                              subject_length);
-  }
-  *match_pos = pos;
-  return true;
-}
-
-
 static bool SearchStringMultiple(Isolate* isolate,
                                  Handle<String> subject,
                                  Handle<String> pattern,
@@ -3856,6 +3870,8 @@ static RegExpImpl::IrregexpResult SearchRegExpNoCaptureMultiple(
 }
 
 
+// Only called from Runtime_RegExpExecMultiple so it doesn't need to maintain
+// separate last match info.  See comment on that function.
 static RegExpImpl::IrregexpResult SearchRegExpMultiple(
     Isolate* isolate,
     Handle<String> subject,
@@ -3884,10 +3900,6 @@ static RegExpImpl::IrregexpResult SearchRegExpMultiple(
   // End of previous match. Differs from pos if match was empty.
   int match_end = 0;
   if (result == RegExpImpl::RE_SUCCESS) {
-    // Need to keep a copy of the previous match for creating last_match_info
-    // at the end, so we have two vectors that we swap between.
-    OffsetsVector registers2(required_registers, isolate);
-    Vector<int> prev_register_vector(registers2.vector(), registers2.length());
     bool first = true;
     do {
       int match_start = register_vector[0];
@@ -3940,11 +3952,6 @@ static RegExpImpl::IrregexpResult SearchRegExpMultiple(
         elements->set(capture_count + 2, *subject);
         builder->Add(*isolate->factory()->NewJSArrayWithElements(elements));
       }
-      // Swap register vectors, so the last successful match is in
-      // prev_register_vector.
-      Vector<int32_t> tmp = prev_register_vector;
-      prev_register_vector = register_vector;
-      register_vector = tmp;
 
       if (match_end > match_start) {
         pos = match_end;
@@ -3976,12 +3983,12 @@ static RegExpImpl::IrregexpResult SearchRegExpMultiple(
       last_match_array->EnsureSize(last_match_array_size);
       AssertNoAllocation no_gc;
       FixedArray* elements = FixedArray::cast(last_match_array->elements());
+      // We have to set this even though the rest of the last match array is
+      // ignored.
       RegExpImpl::SetLastCaptureCount(elements, last_match_capture_count);
+      // These are also read without consulting the override.
       RegExpImpl::SetLastSubject(elements, *subject);
       RegExpImpl::SetLastInput(elements, *subject);
-      for (int i = 0; i < last_match_capture_count; i++) {
-        RegExpImpl::SetCapture(elements, i, prev_register_vector[i]);
-      }
       return RegExpImpl::RE_SUCCESS;
     }
   }
@@ -3990,6 +3997,9 @@ static RegExpImpl::IrregexpResult SearchRegExpMultiple(
 }
 
 
+// This is only called for StringReplaceGlobalRegExpWithFunction.  This sets
+// lastMatchInfoOverride to maintain the last match info, so we don't need to
+// set any other last match array info.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) {
   ASSERT(args.length() == 4);
   HandleScope handles(isolate);
@@ -4717,6 +4727,36 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) {
 }
 
 
+// Check whether debugger and is about to step into the callback that is passed
+// to a built-in function such as Array.forEach.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugCallbackSupportsStepping) {
+  if (!isolate->IsDebuggerActive()) return isolate->heap()->false_value();
+  CONVERT_ARG_CHECKED(Object, callback, 0);
+  // We do not step into the callback if it's a builtin or not even a function.
+  if (!callback->IsJSFunction() || JSFunction::cast(callback)->IsBuiltin()) {
+    return isolate->heap()->false_value();
+  }
+  return isolate->heap()->true_value();
+}
+
+
+// Set one shot breakpoints for the callback function that is passed to a
+// built-in function such as Array.forEach to enable stepping into the callback.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrepareStepInIfStepping) {
+  Debug* debug = isolate->debug();
+  if (!debug->IsStepping()) return NULL;
+  CONVERT_ARG_CHECKED(Object, callback, 0);
+  HandleScope scope(isolate);
+  Handle<SharedFunctionInfo> shared_info(JSFunction::cast(callback)->shared());
+  // When leaving the callback, step out has been activated, but not performed
+  // if we do not leave the builtin.  To be able to step into the callback
+  // again, we need to clear the step out at this point.
+  debug->ClearStepOut();
+  debug->FloodWithOneShot(shared_info);
+  return NULL;
+}
+
+
 // Set a local property, even if it is READ_ONLY.  If the property does not
 // exist, it will be added with attributes NONE.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
@@ -7135,29 +7175,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringEquals) {
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_UserObjectEquals) {
-  NoHandleAllocation ha;
-  ASSERT(args.length() == 2);
-
-  CONVERT_ARG_CHECKED(JSObject, lhs, 1);
-  CONVERT_ARG_CHECKED(JSObject, rhs, 0);
-
-  bool result;
-
-  v8::UserObjectComparisonCallback callback = isolate->UserObjectComparisonCallback();
-  if (callback) {
-      HandleScope scope(isolate);
-      Handle<JSObject> lhs_handle(lhs);
-      Handle<JSObject> rhs_handle(rhs);
-      result = callback(v8::Utils::ToLocal(lhs_handle), v8::Utils::ToLocal(rhs_handle));
-  } else {
-      result = (lhs == rhs);
-  }
-
-  return Smi::FromInt(result?0:1);
-}
-
-
 RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberCompare) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
@@ -8163,6 +8180,14 @@ static void MaterializeArgumentsObjectInFrame(Isolate* isolate,
         ASSERT(*arguments != isolate->heap()->undefined_value());
       }
       frame->SetExpression(i, *arguments);
+      if (FLAG_trace_deopt) {
+        PrintF("Materializing arguments object for frame %p - %p: %p ",
+               reinterpret_cast<void*>(frame->sp()),
+               reinterpret_cast<void*>(frame->fp()),
+               reinterpret_cast<void*>(*arguments));
+        arguments->ShortPrint();
+        PrintF("\n");
+      }
     }
   }
 }
@@ -8252,6 +8277,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeoptimizeFunction) {
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearFunctionTypeFeedback) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+  Code* unoptimized = function->shared()->code();
+  if (unoptimized->kind() == Code::FUNCTION) {
+    unoptimized->ClearInlineCaches();
+    unoptimized->ClearTypeFeedbackCells(isolate->heap());
+  }
+  return isolate->heap()->undefined_value();
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_RunningInSimulator) {
 #if defined(USE_SIMULATOR)
   return isolate->heap()->true_value();
@@ -8291,10 +8329,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
   if (!V8::UseCrankshaft()) {
     return Smi::FromInt(4);  // 4 == "never".
   }
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   if (FLAG_always_opt) {
-    return Smi::FromInt(3);  // 3 == "always".
+    // We may have always opt, but that is more best-effort than a real
+    // promise, so we still say "no" if it is not optimized.
+    return function->IsOptimized() ? Smi::FromInt(3)   // 3 == "always".
+                                   : Smi::FromInt(2);  // 2 == "no".
   }
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   return function->IsOptimized() ? Smi::FromInt(1)   // 1 == "yes".
                                  : Smi::FromInt(2);  // 2 == "no".
 }
@@ -8435,6 +8476,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CheckIsBootstrapping) {
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetRootNaN) {
+  RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
+  return isolate->heap()->nan_value();
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_Call) {
   HandleScope scope(isolate);
   ASSERT(args.length() >= 2);
@@ -8527,10 +8574,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) {
   ASSERT(args.length() == 1);
 
   CONVERT_ARG_CHECKED(JSFunction, function, 0);
-  SharedFunctionInfo* shared = function->shared();
-  // TODO: The QML mode should be checked in the ContextLength function.
-  int length = shared->scope_info()->ContextLength(shared->qml_mode());
-
+  int length = function->shared()->scope_info()->ContextLength();
   Object* result;
   { MaybeObject* maybe_result =
         isolate->heap()->AllocateFunctionContext(length, function);
@@ -8637,6 +8681,25 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushBlockContext) {
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_PushModuleContext) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+  CONVERT_ARG_CHECKED(ScopeInfo, scope_info, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSModule, instance, 1);
+
+  Context* context;
+  MaybeObject* maybe_context =
+      isolate->heap()->AllocateModuleContext(isolate->context(),
+                                             scope_info);
+  if (!maybe_context->To(&context)) return maybe_context;
+  // Also initialize the context slot of the instance object.
+  instance->set_context(context);
+  isolate->set_context(context);
+
+  return context;
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteContextSlot) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
@@ -9190,7 +9253,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
 
   // Compile source string in the global context.
   Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
-      source, context, true, CLASSIC_MODE, RelocInfo::kNoPosition, false);
+      source, context, true, CLASSIC_MODE, RelocInfo::kNoPosition);
   if (shared.is_null()) return Failure::Exception();
   Handle<JSFunction> fun =
       isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
@@ -9204,8 +9267,7 @@ static ObjectPair CompileGlobalEval(Isolate* isolate,
                                     Handle<String> source,
                                     Handle<Object> receiver,
                                     LanguageMode language_mode,
-                                    int scope_position,
-                                    bool qml_mode) {
+                                    int scope_position) {
   Handle<Context> context = Handle<Context>(isolate->context());
   Handle<Context> global_context = Handle<Context>(context->global_context());
 
@@ -9225,8 +9287,7 @@ static ObjectPair CompileGlobalEval(Isolate* isolate,
       Handle<Context>(isolate->context()),
       context->IsGlobalContext(),
       language_mode,
-      scope_position,
-      qml_mode);
+      scope_position);
   if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
   Handle<JSFunction> compiled =
       isolate->factory()->NewFunctionFromSharedFunctionInfo(
@@ -9236,7 +9297,7 @@ static ObjectPair CompileGlobalEval(Isolate* isolate,
 
 
 RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
-  ASSERT(args.length() == 6);
+  ASSERT(args.length() == 5);
 
   HandleScope scope(isolate);
   Handle<Object> callee = args.at<Object>(0);
@@ -9257,8 +9318,7 @@ RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
                            args.at<String>(1),
                            args.at<Object>(2),
                            language_mode,
-                           args.smi_at(4),
-                           Smi::cast(args[5])->value());
+                           args.smi_at(4));
 }
 
 
@@ -9966,36 +10026,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_EstimateNumberOfElements) {
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SwapElements) {
-  HandleScope handle_scope(isolate);
-
-  ASSERT_EQ(3, args.length());
-
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
-  Handle<Object> key1 = args.at<Object>(1);
-  Handle<Object> key2 = args.at<Object>(2);
-
-  uint32_t index1, index2;
-  if (!key1->ToArrayIndex(&index1)
-      || !key2->ToArrayIndex(&index2)) {
-    return isolate->ThrowIllegalOperation();
-  }
-
-  Handle<JSObject> jsobject = Handle<JSObject>::cast(object);
-  Handle<Object> tmp1 = Object::GetElement(jsobject, index1);
-  RETURN_IF_EMPTY_HANDLE(isolate, tmp1);
-  Handle<Object> tmp2 = Object::GetElement(jsobject, index2);
-  RETURN_IF_EMPTY_HANDLE(isolate, tmp2);
-
-  RETURN_IF_EMPTY_HANDLE(
-      isolate, JSObject::SetElement(jsobject, index1, tmp2, NONE, kStrictMode));
-  RETURN_IF_EMPTY_HANDLE(
-      isolate, JSObject::SetElement(jsobject, index2, tmp1, NONE, kStrictMode));
-
-  return isolate->heap()->undefined_value();
-}
-
-
 // Returns an array that tells you where in the [0, length) interval an array
 // might have elements.  Can either return keys (positive integers) or
 // intervals (pair of a negative integer (-start-1) followed by a
@@ -10971,10 +11001,10 @@ static Handle<JSObject> MaterializeModuleScope(
 }
 
 
-// Iterate over the actual scopes visible from a stack frame. The iteration
-// proceeds from the innermost visible nested scope outwards. All scopes are
-// backed by an actual context except the local scope, which is inserted
-// "artificially" in the context chain.
+// Iterate over the actual scopes visible from a stack frame or from a closure.
+// The iteration proceeds from the innermost visible nested scope outwards.
+// All scopes are backed by an actual context except the local scope,
+// which is inserted "artificially" in the context chain.
 class ScopeIterator {
  public:
   enum ScopeType {
@@ -11075,6 +11105,18 @@ class ScopeIterator {
     }
   }
 
+  ScopeIterator(Isolate* isolate,
+                Handle<JSFunction> function)
+    : isolate_(isolate),
+      frame_(NULL),
+      inlined_jsframe_index_(0),
+      function_(function),
+      context_(function->context()) {
+    if (function->IsBuiltin()) {
+      context_ = Handle<Context>();
+    }
+  }
+
   // More scopes?
   bool Done() { return context_.is_null(); }
 
@@ -11295,6 +11337,22 @@ static const int kScopeDetailsTypeIndex = 0;
 static const int kScopeDetailsObjectIndex = 1;
 static const int kScopeDetailsSize = 2;
 
+
+static MaybeObject* MaterializeScopeDetails(Isolate* isolate,
+    ScopeIterator* it) {
+  // Calculate the size of the result.
+  int details_size = kScopeDetailsSize;
+  Handle<FixedArray> details = isolate->factory()->NewFixedArray(details_size);
+
+  // Fill in scope details.
+  details->set(kScopeDetailsTypeIndex, Smi::FromInt(it->Type()));
+  Handle<JSObject> scope_object = it->ScopeObject();
+  RETURN_IF_EMPTY_HANDLE(isolate, scope_object);
+  details->set(kScopeDetailsObjectIndex, *scope_object);
+
+  return *isolate->factory()->NewJSArrayWithElements(details);
+}
+
 // Return an array with scope details
 // args[0]: number: break id
 // args[1]: number: frame index
@@ -11332,18 +11390,46 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeDetails) {
   if (it.Done()) {
     return isolate->heap()->undefined_value();
   }
+  return MaterializeScopeDetails(isolate, &it);
+}
 
-  // Calculate the size of the result.
-  int details_size = kScopeDetailsSize;
-  Handle<FixedArray> details = isolate->factory()->NewFixedArray(details_size);
 
-  // Fill in scope details.
-  details->set(kScopeDetailsTypeIndex, Smi::FromInt(it.Type()));
-  Handle<JSObject> scope_object = it.ScopeObject();
-  RETURN_IF_EMPTY_HANDLE(isolate, scope_object);
-  details->set(kScopeDetailsObjectIndex, *scope_object);
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionScopeCount) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 1);
 
-  return *isolate->factory()->NewJSArrayWithElements(details);
+  // Check arguments.
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
+
+  // Count the visible scopes.
+  int n = 0;
+  for (ScopeIterator it(isolate, fun); !it.Done(); it.Next()) {
+    n++;
+  }
+
+  return Smi::FromInt(n);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionScopeDetails) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 2);
+
+  // Check arguments.
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
+  CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
+
+  // Find the requested scope.
+  int n = 0;
+  ScopeIterator it(isolate, fun);
+  for (; !it.Done() && n < index; it.Next()) {
+    n++;
+  }
+  if (it.Done()) {
+    return isolate->heap()->undefined_value();
+  }
+
+  return MaterializeScopeDetails(isolate, &it);
 }
 
 
@@ -11475,12 +11561,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetBreakLocations) {
 }
 
 
-// Return the value of breakpoint_relocation flag
-RUNTIME_FUNCTION(MaybeObject*, Runtime_AllowBreakPointRelocation) {
-  return Smi::FromInt(FLAG_breakpoint_relocation);
-}
-
-
 // Set a break point in a function
 // args[0]: function
 // args[1]: number: break source position (within the function source)
@@ -11875,7 +11955,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
   FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
   Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
   Handle<ScopeInfo> scope_info(function->shared()->scope_info());
-  bool qml_mode = function->shared()->qml_mode();
 
   // Traverse the saved contexts chain to find the active context for the
   // selected frame.
@@ -11947,8 +12026,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
                             context,
                             context->IsGlobalContext(),
                             CLASSIC_MODE,
-                            RelocInfo::kNoPosition,
-                            qml_mode);
+                            RelocInfo::kNoPosition);
   if (shared.is_null()) return Failure::Exception();
   Handle<JSFunction> compiled_function =
       isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context);
@@ -11958,8 +12036,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
   Handle<Object> receiver(frame->receiver(), isolate);
   Handle<Object> evaluation_function =
       Execution::Call(compiled_function, receiver, 0, NULL,
-                      &has_pending_exception, false,
-                      Handle<Object>(function->context()->qml_global()));
+                      &has_pending_exception);
   if (has_pending_exception) return Failure::Exception();
 
   Handle<Object> arguments = GetArgumentsObject(isolate,
@@ -12042,8 +12119,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) {
                             context,
                             is_global,
                             CLASSIC_MODE,
-                            RelocInfo::kNoPosition,
-                            false);
+                            RelocInfo::kNoPosition);
   if (shared.is_null()) return Failure::Exception();
   Handle<JSFunction> compiled_function =
       Handle<JSFunction>(
index 22cb12d..83991bb 100644 (file)
@@ -77,6 +77,7 @@ namespace internal {
   \
   /* Utilities */ \
   F(CheckIsBootstrapping, 0, 1) \
+  F(GetRootNaN, 0, 1) \
   F(Call, -1 /* >= 2 */, 1) \
   F(Apply, 5, 1) \
   F(GetFunctionDelegate, 1, 1) \
@@ -88,6 +89,7 @@ namespace internal {
   F(NotifyDeoptimized, 1, 1) \
   F(NotifyOSR, 0, 1) \
   F(DeoptimizeFunction, 1, 1) \
+  F(ClearFunctionTypeFeedback, 1, 1) \
   F(RunningInSimulator, 0, 1) \
   F(OptimizeFunctionOnNextCall, -1, 1) \
   F(GetOptimizationStatus, 1, 1) \
@@ -97,6 +99,8 @@ namespace internal {
   F(AllocateInNewSpace, 1, 1) \
   F(SetNativeFlag, 1, 1) \
   F(StoreArrayLiteralElement, 5, 1) \
+  F(DebugCallbackSupportsStepping, 1, 1) \
+  F(DebugPrepareStepInIfStepping, 1, 1) \
   \
   /* Array join support */ \
   F(PushIfAbsent, 2, 1) \
@@ -156,7 +160,6 @@ namespace internal {
   /* Comparisons */ \
   F(NumberEquals, 2, 1) \
   F(StringEquals, 2, 1) \
-  F(UserObjectEquals, 2, 1) \
   \
   F(NumberCompare, 3, 1) \
   F(SmiLexicographicCompare, 2, 1) \
@@ -257,7 +260,7 @@ namespace internal {
   \
   /* Eval */ \
   F(GlobalReceiver, 1, 1) \
-  F(ResolvePossiblyDirectEval, 6, 2) \
+  F(ResolvePossiblyDirectEval, 5, 2) \
   \
   F(SetProperty, -1 /* 4 or 5 */, 1) \
   F(DefineOrRedefineDataProperty, 4, 1) \
@@ -269,7 +272,6 @@ namespace internal {
   F(GetArrayKeys, 2, 1) \
   F(MoveArrayContents, 2, 1) \
   F(EstimateNumberOfElements, 1, 1) \
-  F(SwapElements, 3, 1) \
   \
   /* Getters and Setters */ \
   F(LookupAccessor, 3, 1) \
@@ -324,6 +326,7 @@ namespace internal {
   F(PushWithContext, 2, 1) \
   F(PushCatchContext, 3, 1) \
   F(PushBlockContext, 2, 1) \
+  F(PushModuleContext, 2, 1) \
   F(DeleteContextSlot, 2, 1) \
   F(LoadContextSlot, 2, 2) \
   F(LoadContextSlotNoReferenceError, 2, 2) \
@@ -332,8 +335,8 @@ namespace internal {
   /* Declarations and initialization */ \
   F(DeclareGlobals, 3, 1) \
   F(DeclareContextSlot, 4, 1) \
-  F(InitializeVarGlobal, -1 /* 3 or 4 */, 1) \
-  F(InitializeConstGlobal, 3, 1) \
+  F(InitializeVarGlobal, -1 /* 2 or 3 */, 1) \
+  F(InitializeConstGlobal, 2, 1) \
   F(InitializeConstContextSlot, 3, 1) \
   F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
   \
@@ -401,12 +404,13 @@ namespace internal {
   F(GetFrameDetails, 2, 1) \
   F(GetScopeCount, 2, 1) \
   F(GetScopeDetails, 4, 1) \
+  F(GetFunctionScopeCount, 1, 1) \
+  F(GetFunctionScopeDetails, 2, 1) \
   F(DebugPrintScopes, 0, 1) \
   F(GetThreadCount, 1, 1) \
   F(GetThreadDetails, 2, 1) \
   F(SetDisableBreak, 1, 1) \
   F(GetBreakLocations, 1, 1) \
-  F(AllowBreakPointRelocation, 0, 1) \
   F(SetFunctionBreakPoint, 3, 1) \
   F(SetScriptBreakPoint, 3, 1) \
   F(ClearBreakPoint, 1, 1) \
@@ -531,8 +535,7 @@ namespace internal {
   F(RegExpExec, 4, 1)                                                        \
   F(RegExpConstructResult, 3, 1)                                             \
   F(GetFromCache, 2, 1)                                                      \
-  F(NumberToString, 1, 1)                                                    \
-  F(SwapElements, 3, 1)
+  F(NumberToString, 1, 1)
 
 
 //---------------------------------------------------------------------------
index 53d9a39..6b48734 100644 (file)
@@ -47,7 +47,7 @@ var $String = global.String;
 var $Number = global.Number;
 var $Function = global.Function;
 var $Boolean = global.Boolean;
-var $NaN = 0/0;
+var $NaN = %GetRootNaN();
 var builtins = this;
 
 // ECMA-262 Section 11.9.3.
index 7901b5d..f24af2e 100755 (executable)
@@ -611,7 +611,7 @@ void Scanner::SeekForward(int pos) {
 }
 
 
-void Scanner::ScanEscape() {
+bool Scanner::ScanEscape() {
   uc32 c = c0_;
   Advance();
 
@@ -621,7 +621,7 @@ void Scanner::ScanEscape() {
     if (IsCarriageReturn(c) && IsLineFeed(c0_)) Advance();
     // Allow LF+CR newlines in multiline string literals.
     if (IsLineFeed(c) && IsCarriageReturn(c0_)) Advance();
-    return;
+    return true;
   }
 
   switch (c) {
@@ -635,13 +635,13 @@ void Scanner::ScanEscape() {
     case 't' : c = '\t'; break;
     case 'u' : {
       c = ScanHexNumber(4);
-      if (c < 0) c = 'u';
+      if (c < 0) return false;
       break;
     }
     case 'v' : c = '\v'; break;
     case 'x' : {
       c = ScanHexNumber(2);
-      if (c < 0) c = 'x';
+      if (c < 0) return false;
       break;
     }
     case '0' :  // fall through
@@ -654,10 +654,11 @@ void Scanner::ScanEscape() {
     case '7' : c = ScanOctalEscape(c, 2); break;
   }
 
-  // According to ECMA-262, 3rd, 7.8.4 (p 18ff) these
-  // should be illegal, but they are commonly handled
-  // as non-escaped characters by JS VMs.
+  // According to ECMA-262, section 7.8.4, characters not covered by the
+  // above cases should be illegal, but they are commonly handled as
+  // non-escaped characters by JS VMs.
   AddLiteralChar(c);
+  return true;
 }
 
 
@@ -696,8 +697,7 @@ Token::Value Scanner::ScanString() {
     uc32 c = c0_;
     Advance();
     if (c == '\\') {
-      if (c0_ < 0) return Token::ILLEGAL;
-      ScanEscape();
+      if (c0_ < 0 || !ScanEscape()) return Token::ILLEGAL;
     } else {
       AddLiteralChar(c);
     }
index 045e7d2..4de413b 100644 (file)
@@ -520,13 +520,16 @@ class Scanner {
   Token::Value ScanIdentifierOrKeyword();
   Token::Value ScanIdentifierSuffix(LiteralScope* literal);
 
-  void ScanEscape();
   Token::Value ScanString();
 
-  // Decodes a unicode escape-sequence which is part of an identifier.
+  // Scans an escape-sequence which is part of a string and adds the
+  // decoded character to the current literal. Returns true if a pattern
+  // is scanned.
+  bool ScanEscape();
+  // Decodes a Unicode escape-sequence which is part of an identifier.
   // If the escape sequence cannot be decoded the result is kBadChar.
   uc32 ScanIdentifierUnicodeEscape();
-  // Recognizes a uniocde escape-sequence and adds its characters,
+  // Scans a Unicode escape-sequence and adds its characters,
   // uninterpreted, to the current literal. Used for parsing RegExp
   // flags.
   bool ScanLiteralUnicodeEscape();
index 3150045..f50af30 100644 (file)
@@ -53,7 +53,7 @@ Handle<ScopeInfo> ScopeInfo::Create(Scope* scope) {
   FunctionVariableInfo function_name_info;
   VariableMode function_variable_mode;
   if (scope->is_function_scope() && scope->function() != NULL) {
-    Variable* var = scope->function()->var();
+    Variable* var = scope->function()->proxy()->var();
     if (!var->is_used()) {
       function_name_info = UNUSED;
     } else if (var->IsContextSlot()) {
@@ -80,7 +80,6 @@ Handle<ScopeInfo> ScopeInfo::Create(Scope* scope) {
   int flags = TypeField::encode(scope->type()) |
       CallsEvalField::encode(scope->calls_eval()) |
       LanguageModeField::encode(scope->language_mode()) |
-      QmlModeField::encode(scope->is_qml_mode()) |
       FunctionVariableField::encode(function_name_info) |
       FunctionVariableMode::encode(function_variable_mode);
   scope_info->SetFlags(flags);
@@ -130,8 +129,8 @@ Handle<ScopeInfo> ScopeInfo::Create(Scope* scope) {
   // If present, add the function variable name and its index.
   ASSERT(index == scope_info->FunctionNameEntryIndex());
   if (has_function_name) {
-    int var_index = scope->function()->var()->index();
-    scope_info->set(index++, *scope->function()->name());
+    int var_index = scope->function()->proxy()->var()->index();
+    scope_info->set(index++, *scope->function()->proxy()->name());
     scope_info->set(index++, Smi::FromInt(var_index));
     ASSERT(function_name_info != STACK ||
            (var_index == scope_info->StackLocalCount() &&
@@ -143,7 +142,9 @@ Handle<ScopeInfo> ScopeInfo::Create(Scope* scope) {
   ASSERT(index == scope_info->length());
   ASSERT(scope->num_parameters() == scope_info->ParameterCount());
   ASSERT(scope->num_stack_slots() == scope_info->StackSlotCount());
-  ASSERT(scope->num_heap_slots() == scope_info->ContextLength());
+  ASSERT(scope->num_heap_slots() == scope_info->ContextLength() ||
+         (scope->num_heap_slots() == kVariablePartIndex &&
+          scope_info->ContextLength() == 0));
   return scope_info;
 }
 
@@ -169,11 +170,6 @@ LanguageMode ScopeInfo::language_mode() {
 }
 
 
-bool ScopeInfo::IsQmlMode() {
-  return length() > 0 && QmlModeField::decode(Flags());
-}
-
-
 int ScopeInfo::LocalCount() {
   return StackLocalCount() + ContextLocalCount();
 }
@@ -189,7 +185,7 @@ int ScopeInfo::StackSlotCount() {
 }
 
 
-int ScopeInfo::ContextLength(bool qml_function) {
+int ScopeInfo::ContextLength() {
   if (length() > 0) {
     int context_locals = ContextLocalCount();
     bool function_name_context_slot =
@@ -198,9 +194,7 @@ int ScopeInfo::ContextLength(bool qml_function) {
         function_name_context_slot ||
         Type() == WITH_SCOPE ||
         (Type() == FUNCTION_SCOPE && CallsEval());
-
-    // TODO: The QML mode should be checked in the has_context expression.
-    if (has_context || qml_function) {
+    if (has_context) {
       return Context::MIN_CONTEXT_SLOTS + context_locals +
           (function_name_context_slot ? 1 : 0);
     }
index 1f18e51..2c61a75 100644 (file)
@@ -36,8 +36,6 @@
 
 #include "allocation-inl.h"
 
-#include "debug.h"
-
 namespace v8 {
 namespace internal {
 
@@ -186,8 +184,6 @@ void Scope::SetDefaults(ScopeType type,
   // Inherit the strict mode from the parent scope.
   language_mode_ = (outer_scope != NULL)
       ? outer_scope->language_mode_ : CLASSIC_MODE;
-  qml_mode_flag_ = (outer_scope != NULL)
-      ? outer_scope->qml_mode_flag_ : kNonQmlMode;
   outer_scope_calls_non_strict_eval_ = false;
   inner_scope_calls_eval_ = false;
   force_eager_compilation_ = false;
@@ -392,14 +388,17 @@ Variable* Scope::LocalLookup(Handle<String> name) {
 
   // Check context slot lookup.
   VariableMode mode;
+  Variable::Location location = Variable::CONTEXT;
   InitializationFlag init_flag;
   int index = scope_info_->ContextSlotIndex(*name, &mode, &init_flag);
   if (index < 0) {
     // Check parameters.
-    mode = VAR;
-    init_flag = kCreatedInitialized;
     index = scope_info_->ParameterIndex(*name);
     if (index < 0) return NULL;
+
+    mode = DYNAMIC;
+    location = Variable::LOOKUP;
+    init_flag = kCreatedInitialized;
   }
 
   Variable* var =
@@ -409,21 +408,27 @@ Variable* Scope::LocalLookup(Handle<String> name) {
                          true,
                          Variable::NORMAL,
                          init_flag);
-  var->AllocateTo(Variable::CONTEXT, index);
+  var->AllocateTo(location, index);
   return var;
 }
 
 
 Variable* Scope::LookupFunctionVar(Handle<String> name,
                                    AstNodeFactory<AstNullVisitor>* factory) {
-  if (function_ != NULL && function_->name().is_identical_to(name)) {
-    return function_->var();
+  if (function_ != NULL && function_->proxy()->name().is_identical_to(name)) {
+    return function_->proxy()->var();
   } else if (!scope_info_.is_null()) {
     // If we are backed by a scope info, try to lookup the variable there.
     VariableMode mode;
     int index = scope_info_->FunctionContextSlotIndex(*name, &mode);
     if (index < 0) return NULL;
-    Variable* var = DeclareFunctionVar(name, mode, factory);
+    Variable* var = new Variable(
+        this, name, mode, true /* is valid LHS */,
+        Variable::NORMAL, kCreatedInitialized);
+    VariableProxy* proxy = factory->NewVariableProxy(var);
+    VariableDeclaration* declaration =
+        factory->NewVariableDeclaration(proxy, mode, this);
+    DeclareFunctionVar(declaration);
     var->AllocateTo(Variable::CONTEXT, index);
     return var;
   } else {
@@ -653,6 +658,26 @@ bool Scope::HasTrivialOuterContext() const {
 }
 
 
+bool Scope::AllowsLazyRecompilation() const {
+  return !force_eager_compilation_ &&
+         !TrivialDeclarationScopesBeforeWithScope();
+}
+
+
+bool Scope::TrivialDeclarationScopesBeforeWithScope() const {
+  Scope* outer = outer_scope_;
+  if (outer == NULL) return false;
+  outer = outer->DeclarationScope();
+  while (outer != NULL) {
+    if (outer->is_with_scope()) return true;
+    if (outer->is_declaration_scope() && outer->num_heap_slots() > 0)
+      return false;
+    outer = outer->outer_scope_;
+  }
+  return false;
+}
+
+
 int Scope::ContextChainLength(Scope* scope) {
   int n = 0;
   for (Scope* s = this; s != scope; s = s->outer_scope_) {
@@ -795,7 +820,7 @@ void Scope::Print(int n) {
   // Function name, if any (named function literals, only).
   if (function_ != NULL) {
     Indent(n1, "// (local) function name: ");
-    PrintName(function_->name());
+    PrintName(function_->proxy()->name());
     PrintF("\n");
   }
 
@@ -828,7 +853,7 @@ void Scope::Print(int n) {
   // Print locals.
   Indent(n1, "// function var\n");
   if (function_ != NULL) {
-    PrintVar(n1, function_->var());
+    PrintVar(n1, function_->proxy()->var());
   }
 
   Indent(n1, "// temporary vars\n");
@@ -953,30 +978,14 @@ bool Scope::ResolveVariable(CompilationInfo* info,
       break;
 
     case BOUND_EVAL_SHADOWED:
-      // We found a variable variable binding that might be shadowed
-      // by 'eval' introduced variable bindings.
+      // We either found a variable binding that might be shadowed by eval  or
+      // gave up on it (e.g. by encountering a local with the same in the outer
+      // scope which was not promoted to a context, this can happen if we use
+      // debugger to evaluate arbitrary expressions at a break point).
       if (var->is_global()) {
         var = NonLocal(proxy->name(), DYNAMIC_GLOBAL);
-
-        if (is_qml_mode()) {
-          Handle<GlobalObject> global = isolate_->global();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-          if (isolate_->debug()->IsLoaded() && isolate_->debug()->InDebugger()) {
-            // Get the context before the debugger was entered.
-            SaveContext *save = isolate_->save_context();
-            while (save != NULL && *save->context() == *isolate_->debug()->debug_context())
-              save = save->prev();
-
-            global = Handle<GlobalObject>(save->context()->global());
-          }
-#endif
-
-          if (is_qml_mode() && !global->HasProperty(*(proxy->name()))) {
-            var->set_is_qml_global(true);
-          }
-        }
-
+      } else if (var->is_dynamic()) {
+        var = NonLocal(proxy->name(), DYNAMIC);
       } else {
         Variable* invalidated = var;
         var = NonLocal(proxy->name(), DYNAMIC_LOCAL);
@@ -987,52 +996,12 @@ bool Scope::ResolveVariable(CompilationInfo* info,
     case UNBOUND:
       // No binding has been found. Declare a variable in global scope.
       var = info->global_scope()->DeclareGlobal(proxy->name());
-
-      if (is_qml_mode()) {
-        Handle<GlobalObject> global = isolate_->global();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-        if (isolate_->debug()->IsLoaded() && isolate_->debug()->InDebugger()) {
-          // Get the context before the debugger was entered.
-          SaveContext *save = isolate_->save_context();
-          while (save != NULL && *save->context() == *isolate_->debug()->debug_context())
-            save = save->prev();
-
-          global = Handle<GlobalObject>(save->context()->global());
-        }
-#endif
-
-        if (!global->HasProperty(*(proxy->name()))) {
-          var->set_is_qml_global(true);
-        }
-      }
-
       break;
 
     case UNBOUND_EVAL_SHADOWED:
       // No binding has been found. But some scope makes a
       // non-strict 'eval' call.
       var = NonLocal(proxy->name(), DYNAMIC_GLOBAL);
-
-      if (is_qml_mode()) {
-        Handle<GlobalObject> global = isolate_->global();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-        if (isolate_->debug()->IsLoaded() && isolate_->debug()->InDebugger()) {
-          // Get the context before the debugger was entered.
-          SaveContext *save = isolate_->save_context();
-          while (save != NULL && *save->context() == *isolate_->debug()->debug_context())
-            save = save->prev();
-
-          global = Handle<GlobalObject>(save->context()->global());
-        }
-#endif
-
-        if (is_qml_mode() && !global->HasProperty(*(proxy->name()))) {
-          var->set_is_qml_global(true);
-        }
-      }
-
       break;
 
     case DYNAMIC_LOOKUP:
@@ -1150,7 +1119,7 @@ bool Scope::MustAllocateInContext(Variable* var) {
   // Exceptions: temporary variables are never allocated in a context;
   // catch-bound variables are always allocated in a context.
   if (var->mode() == TEMPORARY) return false;
-  if (is_catch_scope() || is_block_scope()) return true;
+  if (is_catch_scope() || is_block_scope() || is_module_scope()) return true;
   return var->has_forced_context_allocation() ||
       scope_calls_eval_ ||
       inner_scope_calls_eval_ ||
@@ -1268,7 +1237,7 @@ void Scope::AllocateNonParameterLocals() {
   // because of the current ScopeInfo implementation (see
   // ScopeInfo::ScopeInfo(FunctionScope* scope) constructor).
   if (function_ != NULL) {
-    AllocateNonParameterLocal(function_->var());
+    AllocateNonParameterLocal(function_->proxy()->var());
   }
 }
 
@@ -1294,7 +1263,8 @@ void Scope::AllocateVariablesRecursively() {
   // Force allocation of a context for this scope if necessary. For a 'with'
   // scope and for a function scope that makes an 'eval' call we need a context,
   // even if no local variables were statically allocated in the scope.
-  bool must_have_context = is_with_scope() ||
+  // Likewise for modules.
+  bool must_have_context = is_with_scope() || is_module_scope() ||
       (is_function_scope() && calls_eval());
 
   // If we didn't allocate any locals in the local context, then we only
@@ -1310,14 +1280,14 @@ void Scope::AllocateVariablesRecursively() {
 
 int Scope::StackLocalCount() const {
   return num_stack_slots() -
-      (function_ != NULL && function_->var()->IsStackLocal() ? 1 : 0);
+      (function_ != NULL && function_->proxy()->var()->IsStackLocal() ? 1 : 0);
 }
 
 
 int Scope::ContextLocalCount() const {
   if (num_heap_slots() == 0) return 0;
   return num_heap_slots() - Context::MIN_CONTEXT_SLOTS -
-      (function_ != NULL && function_->var()->IsContextSlot() ? 1 : 0);
+      (function_ != NULL && function_->proxy()->var()->IsContextSlot() ? 1 : 0);
 }
 
 } }  // namespace v8::internal
index 2a3b4d3..be6705b 100644 (file)
@@ -126,15 +126,9 @@ class Scope: public ZoneObject {
   // Declare the function variable for a function literal. This variable
   // is in an intermediate scope between this function scope and the the
   // outer scope. Only possible for function scopes; at most one variable.
-  template<class Visitor>
-  Variable* DeclareFunctionVar(Handle<String> name,
-                               VariableMode mode,
-                               AstNodeFactory<Visitor>* factory) {
-    ASSERT(is_function_scope() && function_ == NULL);
-    Variable* function_var = new Variable(
-        this, name, mode, true, Variable::NORMAL, kCreatedInitialized);
-    function_ = factory->NewVariableProxy(function_var);
-    return function_var;
+  void DeclareFunctionVar(VariableDeclaration* declaration) {
+    ASSERT(is_function_scope());
+    function_ = declaration;
   }
 
   // Declare a parameter in this scope.  When there are duplicated
@@ -230,11 +224,6 @@ class Scope: public ZoneObject {
     language_mode_ = language_mode;
   }
 
-  // Enable qml mode for this scope
-  void EnableQmlModeFlag() {
-    qml_mode_flag_ = kQmlMode;
-  }
-
   // Position in the source where this scope begins and ends.
   //
   // * For the scope of a with statement
@@ -289,7 +278,6 @@ class Scope: public ZoneObject {
   bool is_strict_or_extended_eval_scope() const {
     return is_eval_scope() && !is_classic_mode();
   }
-  bool is_qml_mode() const { return qml_mode_flag() == kQmlMode; }
 
   // Information about which scopes calls eval.
   bool calls_eval() const { return scope_calls_eval_; }
@@ -314,16 +302,12 @@ class Scope: public ZoneObject {
   // The language mode of this scope.
   LanguageMode language_mode() const { return language_mode_; }
 
-  // The strict mode of this scope.
-  QmlModeFlag qml_mode_flag() const { return qml_mode_flag_; }
-
   // The variable corresponding the 'this' value.
   Variable* receiver() { return receiver_; }
 
   // The variable holding the function literal for named function
-  // literals, or NULL.
-  // Only valid for function scopes.
-  VariableProxy* function() const {
+  // literals, or NULL.  Only valid for function scopes.
+  VariableDeclaration* function() const {
     ASSERT(is_function_scope());
     return function_;
   }
@@ -378,13 +362,16 @@ class Scope: public ZoneObject {
   bool AllowsLazyCompilation() const;
 
   // True if we can lazily recompile functions with this scope.
-  bool allows_lazy_recompilation() const {
-    return !force_eager_compilation_;
-  }
+  bool AllowsLazyRecompilation() const;
 
   // True if the outer context of this scope is always the global context.
   bool HasTrivialOuterContext() const;
 
+  // True if this scope is inside a with scope and all declaration scopes
+  // between them have empty contexts. Such declaration scopes become
+  // invisible during scope info deserialization.
+  bool TrivialDeclarationScopesBeforeWithScope() const;
+
   // The number of contexts between this and scope; zero if this == scope.
   int ContextChainLength(Scope* scope);
 
@@ -455,7 +442,7 @@ class Scope: public ZoneObject {
   // Convenience variable.
   Variable* receiver_;
   // Function variable, if any; function scopes only.
-  VariableProxy* function_;
+  VariableDeclaration* function_;
   // Convenience variable; function scopes only.
   Variable* arguments_;
   // Interface; module scopes only.
@@ -475,8 +462,6 @@ class Scope: public ZoneObject {
   bool scope_calls_eval_;
   // The language mode of this scope.
   LanguageMode language_mode_;
-  // This scope is a qml mode scope.
-  QmlModeFlag qml_mode_flag_;
   // Source positions.
   int start_position_;
   int end_position_;
index 01d5f1c..cf8e5e1 100644 (file)
@@ -244,7 +244,7 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
     "Isolate::" #hacker_name "_address",
     FOR_EACH_ISOLATE_ADDRESS_NAME(BUILD_NAME_LITERAL)
     NULL
-#undef C
+#undef BUILD_NAME_LITERAL
   };
 
   for (uint16_t i = 0; i < Isolate::kIsolateAddressCount; ++i) {
index 55bf222..ed78fc7 100644 (file)
@@ -295,11 +295,27 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
 
 MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
   Address old_top = allocation_info_.top;
+#ifdef DEBUG
+  // If we are stressing compaction we waste some memory in new space
+  // in order to get more frequent GCs.
+  if (FLAG_stress_compaction && !HEAP->linear_allocation()) {
+    if (allocation_info_.limit - old_top >= size_in_bytes * 4) {
+      int filler_size = size_in_bytes * 4;
+      for (int i = 0; i < filler_size; i += kPointerSize) {
+        *(reinterpret_cast<Object**>(old_top + i)) =
+            HEAP->one_pointer_filler_map();
+      }
+      old_top += filler_size;
+      allocation_info_.top += filler_size;
+    }
+  }
+#endif
+
   if (allocation_info_.limit - old_top < size_in_bytes) {
     return SlowAllocateRaw(size_in_bytes);
   }
 
-  Object* obj = HeapObject::FromAddress(allocation_info_.top);
+  Object* obj = HeapObject::FromAddress(old_top);
   allocation_info_.top += size_in_bytes;
   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
 
index 6144464..a0c8f2c 100644 (file)
@@ -362,15 +362,22 @@ Address MemoryAllocator::AllocateAlignedMemory(size_t size,
   if (base == NULL) return NULL;
 
   if (executable == EXECUTABLE) {
-    CommitCodePage(&reservation, base, size);
+    if (!CommitCodePage(&reservation, base, size)) {
+      base = NULL;
+    }
   } else {
-    if (!reservation.Commit(base,
-                            size,
-                            executable == EXECUTABLE)) {
-      return NULL;
+    if (!reservation.Commit(base, size, false)) {
+      base = NULL;
     }
   }
 
+  if (base == NULL) {
+    // Failed to commit the body. Release the mapping and any partially
+    // commited regions inside it.
+    reservation.Release();
+    return NULL;
+  }
+
   controller->TakeControl(&reservation);
   return base;
 }
@@ -2288,8 +2295,6 @@ bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
     first_unswept_page_ = p;
   }
 
-  heap()->LowerOldGenLimits(freed_bytes);
-
   heap()->FreeQueuedChunks();
 
   return IsSweepingComplete();
index 90f62f3..b0ecc5d 100644 (file)
@@ -1042,7 +1042,9 @@ class MemoryAllocator {
     return CodePageAreaEndOffset() - CodePageAreaStartOffset();
   }
 
-  static bool CommitCodePage(VirtualMemory* vm, Address start, size_t size);
+  MUST_USE_RESULT static bool CommitCodePage(VirtualMemory* vm,
+                                             Address start,
+                                             size_t size);
 
  private:
   Isolate* isolate_;
@@ -1520,6 +1522,10 @@ class PagedSpace : public Space {
     return size_in_bytes - wasted;
   }
 
+  void ResetFreeList() {
+    free_list_.Reset();
+  }
+
   // Set space allocation info.
   void SetTop(Address top, Address limit) {
     ASSERT(top == limit ||
@@ -2371,11 +2377,6 @@ class FixedSpace : public PagedSpace {
   // Prepares for a mark-compact GC.
   virtual void PrepareForMarkCompact();
 
- protected:
-  void ResetFreeList() {
-    free_list_.Reset();
-  }
-
  private:
   // The size of objects in this space.
   int object_size_in_bytes_;
index a464f7f..6115930 100644 (file)
@@ -237,10 +237,28 @@ function StringReplace(search, replace) {
                                                         replace);
       }
     } else {
-      return %StringReplaceRegExpWithString(subject,
-                                            search,
-                                            TO_STRING_INLINE(replace),
-                                            lastMatchInfo);
+      if (lastMatchInfoOverride == null) {
+        return %StringReplaceRegExpWithString(subject,
+                                              search,
+                                              TO_STRING_INLINE(replace),
+                                              lastMatchInfo);
+      } else {
+        // We use this hack to detect whether StringReplaceRegExpWithString
+        // found at least one hit.  In that case we need to remove any
+        // override.
+        var saved_subject = lastMatchInfo[LAST_SUBJECT_INDEX];
+        lastMatchInfo[LAST_SUBJECT_INDEX] = 0;
+        var answer = %StringReplaceRegExpWithString(subject,
+                                                    search,
+                                                    TO_STRING_INLINE(replace),
+                                                    lastMatchInfo);
+        if (%_IsSmi(lastMatchInfo[LAST_SUBJECT_INDEX])) {
+          lastMatchInfo[LAST_SUBJECT_INDEX] = saved_subject;
+        } else {
+          lastMatchInfoOverride = null;
+        }
+        return answer;
+      }
     }
   }
 
@@ -259,47 +277,34 @@ function StringReplace(search, replace) {
   if (start < 0) return subject;
   var end = start + search.length;
 
-  var builder = new ReplaceResultBuilder(subject);
-  // prefix
-  builder.addSpecialSlice(0, start);
+  var result = SubString(subject, 0, start);
 
   // Compute the string to replace with.
   if (IS_SPEC_FUNCTION(replace)) {
     var receiver = %GetDefaultReceiver(replace);
-    builder.add(%_CallFunction(receiver,
-                               search,
-                               start,
-                               subject,
-                               replace));
+    result += %_CallFunction(receiver, search, start, subject, replace);
   } else {
     reusableMatchInfo[CAPTURE0] = start;
     reusableMatchInfo[CAPTURE1] = end;
     replace = TO_STRING_INLINE(replace);
-    ExpandReplacement(replace, subject, reusableMatchInfo, builder);
+    result = ExpandReplacement(replace, subject, reusableMatchInfo, result);
   }
 
-  // suffix
-  builder.addSpecialSlice(end, subject.length);
-
-  return builder.generate();
+  return result + SubString(subject, end, subject.length);
 }
 
 
 // Expand the $-expressions in the string and return a new string with
 // the result.
-function ExpandReplacement(string, subject, matchInfo, builder) {
+function ExpandReplacement(string, subject, matchInfo, result) {
   var length = string.length;
-  var builder_elements = builder.elements;
   var next = %StringIndexOf(string, '$', 0);
   if (next < 0) {
-    if (length > 0) builder_elements.push(string);
-    return;
+    if (length > 0) result += string;
+    return result;
   }
 
-  // Compute the number of captures; see ECMA-262, 15.5.4.11, p. 102.
-  var m = NUMBER_OF_CAPTURES(matchInfo) >> 1;  // Includes the match.
-
-  if (next > 0) builder_elements.push(SubString(string, 0, next));
+  if (next > 0) result += SubString(string, 0, next);
 
   while (true) {
     var expansion = '$';
@@ -308,51 +313,21 @@ function ExpandReplacement(string, subject, matchInfo, builder) {
       var peek = %_StringCharCodeAt(string, position);
       if (peek == 36) {         // $$
         ++position;
-        builder_elements.push('$');
+        result += '$';
       } else if (peek == 38) {  // $& - match
         ++position;
-        builder.addSpecialSlice(matchInfo[CAPTURE0],
-                                matchInfo[CAPTURE1]);
+        result += SubString(subject, matchInfo[CAPTURE0], matchInfo[CAPTURE1]);
       } else if (peek == 96) {  // $` - prefix
         ++position;
-        builder.addSpecialSlice(0, matchInfo[CAPTURE0]);
+        result += SubString(subject, 0, matchInfo[CAPTURE0]);
       } else if (peek == 39) {  // $' - suffix
         ++position;
-        builder.addSpecialSlice(matchInfo[CAPTURE1], subject.length);
-      } else if (peek >= 48 && peek <= 57) {  // $n, 0 <= n <= 9
-        ++position;
-        var n = peek - 48;
-        if (position < length) {
-          peek = %_StringCharCodeAt(string, position);
-          // $nn, 01 <= nn <= 99
-          if (n != 0 && peek == 48 || peek >= 49 && peek <= 57) {
-            var nn = n * 10 + (peek - 48);
-            if (nn < m) {
-              // If the two digit capture reference is within range of
-              // the captures, we use it instead of the single digit
-              // one. Otherwise, we fall back to using the single
-              // digit reference. This matches the behavior of
-              // SpiderMonkey.
-              ++position;
-              n = nn;
-            }
-          }
-        }
-        if (0 < n && n < m) {
-          addCaptureString(builder, matchInfo, n);
-        } else {
-          // Because of the captures range check in the parsing of two
-          // digit capture references, we can only enter here when a
-          // single digit capture reference is outside the range of
-          // captures.
-          builder_elements.push('$');
-          --position;
-        }
+        result += SubString(subject, matchInfo[CAPTURE1], subject.length);
       } else {
-        builder_elements.push('$');
+        result += '$';
       }
     } else {
-      builder_elements.push('$');
+      result += '$';
     }
 
     // Go the the next $ in the string.
@@ -362,16 +337,17 @@ function ExpandReplacement(string, subject, matchInfo, builder) {
     // haven't reached the end, we need to append the suffix.
     if (next < 0) {
       if (position < length) {
-        builder_elements.push(SubString(string, position, length));
+        result += SubString(string, position, length);
       }
-      return;
+      return result;
     }
 
     // Append substring between the previous and the next $ character.
     if (next > position) {
-      builder_elements.push(SubString(string, position, next));
+      result += SubString(string, position, next);
     }
   }
+  return result;
 }
 
 
@@ -388,18 +364,6 @@ function CaptureString(string, lastCaptureInfo, index) {
 }
 
 
-// Add the string of a given regular expression capture to the
-// ReplaceResultBuilder
-function addCaptureString(builder, matchInfo, index) {
-  // Scale the index.
-  var scaled = index << 1;
-  // Compute start and end.
-  var start = matchInfo[CAPTURE(scaled)];
-  if (start < 0) return;
-  var end = matchInfo[CAPTURE(scaled + 1)];
-  builder.addSpecialSlice(start, end);
-}
-
 // TODO(lrn): This array will survive indefinitely if replace is never
 // called again. However, it will be empty, since the contents are cleared
 // in the finally block.
@@ -429,14 +393,22 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
     return subject;
   }
   var len = res.length;
-  var i = 0;
   if (NUMBER_OF_CAPTURES(lastMatchInfo) == 2) {
+    // If the number of captures is two then there are no explicit captures in
+    // the regexp, just the implicit capture that captures the whole match.  In
+    // this case we can simplify quite a bit and end up with something faster.
+    // The builder will consist of some integers that indicate slices of the
+    // input string and some replacements that were returned from the replace
+    // function.
     var match_start = 0;
     var override = new InternalArray(null, 0, subject);
     var receiver = %GetDefaultReceiver(replace);
-    while (i < len) {
+    for (var i = 0; i < len; i++) {
       var elem = res[i];
       if (%_IsSmi(elem)) {
+        // Integers represent slices of the original string.  Use these to
+        // get the offsets we need for the override array (so things like
+        // RegExp.leftContext work during the callback function.
         if (elem > 0) {
           match_start = (elem >> 11) + (elem & 0x7ff);
         } else {
@@ -448,23 +420,25 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
         lastMatchInfoOverride = override;
         var func_result =
             %_CallFunction(receiver, elem, match_start, subject, replace);
+        // Overwrite the i'th element in the results with the string we got
+        // back from the callback function.
         res[i] = TO_STRING_INLINE(func_result);
         match_start += elem.length;
       }
-      i++;
     }
   } else {
     var receiver = %GetDefaultReceiver(replace);
-    while (i < len) {
+    for (var i = 0; i < len; i++) {
       var elem = res[i];
       if (!%_IsSmi(elem)) {
         // elem must be an Array.
         // Use the apply argument as backing for global RegExp properties.
         lastMatchInfoOverride = elem;
         var func_result = %Apply(replace, receiver, elem, 0, elem.length);
+        // Overwrite the i'th element in the results with the string we got
+        // back from the callback function.
         res[i] = TO_STRING_INLINE(func_result);
       }
-      i++;
     }
   }
   var resultBuilder = new ReplaceResultBuilder(subject, res);
@@ -478,9 +452,8 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
 function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
   var matchInfo = DoRegExpExec(regexp, subject, 0);
   if (IS_NULL(matchInfo)) return subject;
-  var result = new ReplaceResultBuilder(subject);
   var index = matchInfo[CAPTURE0];
-  result.addSpecialSlice(0, index);
+  var result = SubString(subject, 0, index);
   var endOfMatch = matchInfo[CAPTURE1];
   // Compute the parameter list consisting of the match, captures, index,
   // and subject for the replace function invocation.
@@ -492,8 +465,7 @@ function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
     // No captures, only the match, which is always valid.
     var s = SubString(subject, index, endOfMatch);
     // Don't call directly to avoid exposing the built-in global object.
-    replacement =
-        %_CallFunction(receiver, s, index, subject, replace);
+    replacement = %_CallFunction(receiver, s, index, subject, replace);
   } else {
     var parameters = new InternalArray(m + 2);
     for (var j = 0; j < m; j++) {
@@ -505,11 +477,10 @@ function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
     replacement = %Apply(replace, receiver, parameters, 0, j + 2);
   }
 
-  result.add(replacement);  // The add method converts to string if necessary.
+  result += replacement;  // The add method converts to string if necessary.
   // Can't use matchInfo any more from here, since the function could
   // overwrite it.
-  result.addSpecialSlice(endOfMatch, subject.length);
-  return result.generate();
+  return result + SubString(subject, endOfMatch, subject.length);
 }
 
 
index b8c0058..bd7163a 100644 (file)
@@ -763,8 +763,7 @@ Handle<Code> StubCache::ComputeCallPreMonomorphic(
 
 Handle<Code> StubCache::ComputeCallNormal(int argc,
                                           Code::Kind kind,
-                                          Code::ExtraICState extra_state,
-                                          bool has_qml_global_receiver) {
+                                          Code::ExtraICState extra_state) {
   Code::Flags flags =
       Code::ComputeFlags(kind, MONOMORPHIC, extra_state, NORMAL, argc);
   Handle<UnseededNumberDictionary> cache =
@@ -773,7 +772,7 @@ Handle<Code> StubCache::ComputeCallNormal(int argc,
   if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
 
   StubCompiler compiler(isolate_);
-  Handle<Code> code = compiler.CompileCallNormal(flags, has_qml_global_receiver);
+  Handle<Code> code = compiler.CompileCallNormal(flags);
   FillCache(isolate_, code);
   return code;
 }
@@ -940,7 +939,8 @@ void StubCache::CollectMatchingMaps(SmallMapList* types,
 RUNTIME_FUNCTION(MaybeObject*, LoadCallbackProperty) {
   ASSERT(args[0]->IsJSObject());
   ASSERT(args[1]->IsJSObject());
-  AccessorInfo* callback = AccessorInfo::cast(args[3]);
+  ASSERT(args[3]->IsSmi());
+  AccessorInfo* callback = AccessorInfo::cast(args[4]);
   Address getter_address = v8::ToCData<Address>(callback->getter());
   v8::AccessorGetter fun = FUNCTION_CAST<v8::AccessorGetter>(getter_address);
   ASSERT(fun != NULL);
@@ -951,7 +951,7 @@ RUNTIME_FUNCTION(MaybeObject*, LoadCallbackProperty) {
     // Leaving JavaScript.
     VMState state(isolate, EXTERNAL);
     ExternalCallbackScope call_scope(isolate, getter_address);
-    result = fun(v8::Utils::ToLocal(args.at<String>(4)), info);
+    result = fun(v8::Utils::ToLocal(args.at<String>(5)), info);
   }
   RETURN_IF_SCHEDULED_EXCEPTION(isolate);
   if (result.IsEmpty()) return HEAP->undefined_value();
@@ -998,7 +998,8 @@ RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) {
   ASSERT(kAccessorInfoOffsetInInterceptorArgs == 2);
   ASSERT(args[2]->IsJSObject());  // Receiver.
   ASSERT(args[3]->IsJSObject());  // Holder.
-  ASSERT(args.length() == 5);  // Last arg is data object.
+  ASSERT(args[5]->IsSmi());  // Isolate.
+  ASSERT(args.length() == 6);
 
   Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
   v8::NamedPropertyGetter getter =
@@ -1051,7 +1052,7 @@ static MaybeObject* LoadWithInterceptor(Arguments* args,
   ASSERT(kAccessorInfoOffsetInInterceptorArgs == 2);
   Handle<JSObject> receiver_handle = args->at<JSObject>(2);
   Handle<JSObject> holder_handle = args->at<JSObject>(3);
-  ASSERT(args->length() == 5);  // Last arg is data object.
+  ASSERT(args->length() == 6);
 
   Isolate* isolate = receiver_handle->GetIsolate();
 
@@ -1178,15 +1179,13 @@ Handle<Code> StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
 }
 
 
-Handle<Code> StubCompiler::CompileCallNormal(Code::Flags flags, bool has_qml_global_receiver) {
+Handle<Code> StubCompiler::CompileCallNormal(Code::Flags flags) {
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
   if (kind == Code::CALL_IC) {
-    // Call normal is always with a explict receiver,
-    // or with an implicit qml global receiver.
+    // Call normal is always with a explict receiver.
     ASSERT(!CallIC::Contextual::decode(
-        Code::ExtractExtraICStateFromFlags(flags)) ||
-        has_qml_global_receiver);
+        Code::ExtractExtraICStateFromFlags(flags)));
     CallIC::GenerateNormal(masm(), argc);
   } else {
     KeyedCallIC::GenerateNormal(masm(), argc);
index 206dddd..29bdb61 100644 (file)
@@ -219,8 +219,7 @@ class StubCache {
 
   Handle<Code> ComputeCallNormal(int argc,
                                  Code::Kind kind,
-                                 Code::ExtraICState state,
-                                 bool has_qml_global_receiver);
+                                 Code::ExtraICState state);
 
   Handle<Code> ComputeCallArguments(int argc, Code::Kind kind);
 
@@ -411,7 +410,7 @@ class StubCompiler BASE_EMBEDDED {
   // is extracted from the code flags.
   Handle<Code> CompileCallInitialize(Code::Flags flags);
   Handle<Code> CompileCallPreMonomorphic(Code::Flags flags);
-  Handle<Code> CompileCallNormal(Code::Flags flags, bool has_qml_global_receiver);
+  Handle<Code> CompileCallNormal(Code::Flags flags);
   Handle<Code> CompileCallMegamorphic(Code::Flags flags);
   Handle<Code> CompileCallArguments(Code::Flags flags);
   Handle<Code> CompileCallMiss(Code::Flags flags);
index 89ef4c6..7e8c088 100644 (file)
@@ -89,4 +89,19 @@ char* SimpleStringBuilder::Finalize() {
   return buffer_.start();
 }
 
+
+const DivMagicNumbers DivMagicNumberFor(int32_t divisor) {
+  switch (divisor) {
+    case 3:    return DivMagicNumberFor3;
+    case 5:    return DivMagicNumberFor5;
+    case 7:    return DivMagicNumberFor7;
+    case 9:    return DivMagicNumberFor9;
+    case 11:   return DivMagicNumberFor11;
+    case 25:   return DivMagicNumberFor25;
+    case 125:  return DivMagicNumberFor125;
+    case 625:  return DivMagicNumberFor625;
+    default:   return InvalidDivMagicNumber;
+  }
+}
+
 } }  // namespace v8::internal
index 1d40c98..f116c14 100644 (file)
@@ -85,6 +85,32 @@ inline int WhichPowerOf2(uint32_t x) {
 }
 
 
+// Magic numbers for integer division.
+// These are kind of 2's complement reciprocal of the divisors.
+// Details and proofs can be found in:
+// - Hacker's Delight, Henry S. Warren, Jr.
+// - The PowerPC Compiler Writer’s Guide
+// and probably many others.
+// See details in the implementation of the algorithm in
+// lithium-codegen-arm.cc : LCodeGen::TryEmitSignedIntegerDivisionByConstant().
+struct DivMagicNumbers {
+  unsigned M;
+  unsigned s;
+};
+
+const DivMagicNumbers InvalidDivMagicNumber= {0, 0};
+const DivMagicNumbers DivMagicNumberFor3   = {0x55555556, 0};
+const DivMagicNumbers DivMagicNumberFor5   = {0x66666667, 1};
+const DivMagicNumbers DivMagicNumberFor7   = {0x92492493, 2};
+const DivMagicNumbers DivMagicNumberFor9   = {0x38e38e39, 1};
+const DivMagicNumbers DivMagicNumberFor11  = {0x2e8ba2e9, 1};
+const DivMagicNumbers DivMagicNumberFor25  = {0x51eb851f, 3};
+const DivMagicNumbers DivMagicNumberFor125 = {0x10624dd3, 3};
+const DivMagicNumbers DivMagicNumberFor625 = {0x68db8bad, 8};
+
+const DivMagicNumbers DivMagicNumberFor(int32_t divisor);
+
+
 // The C++ standard leaves the semantics of '>>' undefined for
 // negative signed operands. Most implementations do the right thing,
 // though.
index 45036c8..2910a07 100644 (file)
@@ -118,6 +118,8 @@ void V8::TearDown() {
 
   delete call_completed_callbacks_;
   call_completed_callbacks_ = NULL;
+
+  OS::TearDown();
 }
 
 
@@ -248,7 +250,6 @@ Object* V8::FillHeapNumberWithRandom(Object* heap_number,
 }
 
 void V8::InitializeOncePerProcessImpl() {
-  // Set up the platform OS support.
   OS::SetUp();
 
   use_crankshaft_ = FLAG_crankshaft;
index bfc5e23..6a1766a 100644 (file)
@@ -48,6 +48,10 @@ const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
 const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
 const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
 
+// Desired alignment for double values.
+const intptr_t kDoubleAlignment = 8;
+const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
+
 // Desired alignment for maps.
 #if V8_HOST_ARCH_64_BIT
 const intptr_t kMapAlignmentBits = kObjectAlignmentBits;
index c73222a..bb587e1 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -199,10 +199,13 @@ Vector<const char> ReadFile(FILE* file,
                             bool verbose = true);
 
 
-
 // Copy from ASCII/16bit chars to ASCII/16bit chars.
 template <typename sourcechar, typename sinkchar>
-inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
+INLINE(void CopyChars(sinkchar* dest, const sourcechar* src, int chars));
+
+
+template <typename sourcechar, typename sinkchar>
+void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
   sinkchar* limit = dest + chars;
 #ifdef V8_HOST_CAN_READ_UNALIGNED
   if (sizeof(*dest) == sizeof(*src)) {
index 4984cbd..32ad5bc 100644 (file)
@@ -73,8 +73,7 @@ Variable::Variable(Scope* scope,
     force_context_allocation_(false),
     is_used_(false),
     initialization_flag_(initialization_flag),
-    interface_(interface),
-    is_qml_global_(false) {
+    interface_(interface) {
   // Names must be canonicalized for fast equality checks.
   ASSERT(name->IsSymbol());
   // Var declared variables never need initialization.
index 43b2c81..f49b6e1 100644 (file)
@@ -164,9 +164,6 @@ class Variable: public ZoneObject {
 
   static int CompareIndex(Variable* const* v, Variable* const* w);
 
-  bool is_qml_global() const { return is_qml_global_; }
-  void set_is_qml_global(bool is_qml_global) { is_qml_global_ = is_qml_global; }
-
  private:
   Scope* scope_;
   Handle<String> name_;
@@ -192,9 +189,6 @@ class Variable: public ZoneObject {
 
   // Module type info.
   Interface* interface_;
-
-  // QML info
-  bool is_qml_global_;
 };
 
 
index 6b07103..79b6ebd 100644 (file)
@@ -33,8 +33,8 @@
 // NOTE these macros are used by the SCons build script so their names
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
-#define MINOR_VERSION     10
-#define BUILD_NUMBER      1
+#define MINOR_VERSION     11
+#define BUILD_NUMBER      4
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
index 60b29e6..9f5f850 100644 (file)
@@ -629,7 +629,8 @@ class Assembler : public AssemblerBase {
   static const byte kJccShortPrefix = 0x70;
   static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
   static const byte kJcShortOpcode = kJccShortPrefix | carry;
-
+  static const byte kJnzShortOpcode = kJccShortPrefix | not_zero;
+  static const byte kJzShortOpcode = kJccShortPrefix | zero;
 
 
   // ---------------------------------------------------------------------------
index 63c44e9..d179d2a 100644 (file)
@@ -139,10 +139,6 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
   __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
   __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
 
-  // Copy the qmlglobal object from the previous context.
-  __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::QML_GLOBAL_INDEX)));
-  __ movq(Operand(rax, Context::SlotOffset(Context::QML_GLOBAL_INDEX)), rbx);
-
   // Initialize the rest of the slots to undefined.
   __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
   for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
@@ -207,10 +203,6 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
   __ movq(rbx, ContextOperand(rsi, Context::GLOBAL_INDEX));
   __ movq(ContextOperand(rax, Context::GLOBAL_INDEX), rbx);
 
-  // Copy the qmlglobal object from the previous context.
-  __ movq(rbx, ContextOperand(rsi, Context::QML_GLOBAL_INDEX));
-  __ movq(ContextOperand(rax, Context::QML_GLOBAL_INDEX), rbx);
-
   // Initialize the rest of the slots to the hole value.
   __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
   for (int i = 0; i < slots_; i++) {
@@ -3328,37 +3320,6 @@ void CompareStub::Generate(MacroAssembler* masm) {
   // NOTICE! This code is only reached after a smi-fast-case check, so
   // it is certain that at least one operand isn't a smi.
 
-  {
-    Label not_user_equal, user_equal;
-    __ JumpIfSmi(rax, &not_user_equal);
-    __ JumpIfSmi(rdx, &not_user_equal);
-
-    __ CmpObjectType(rax, JS_OBJECT_TYPE, rbx);
-    __ j(not_equal, &not_user_equal);
-
-    __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
-    __ j(not_equal, &not_user_equal);
-
-    __ testb(FieldOperand(rbx, Map::kBitField2Offset),
-             Immediate(1 << Map::kUseUserObjectComparison));
-    __ j(not_zero, &user_equal);
-    __ testb(FieldOperand(rcx, Map::kBitField2Offset),
-             Immediate(1 << Map::kUseUserObjectComparison));
-    __ j(not_zero, &user_equal);
-
-    __ jmp(&not_user_equal);
-
-    __ bind(&user_equal);
-   
-    __ pop(rbx); // Return address.
-    __ push(rax);
-    __ push(rdx);
-    __ push(rbx);
-    __ TailCallRuntime(Runtime::kUserObjectEquals, 2, 1);
-   
-    __ bind(&not_user_equal);
-  }
-
   // Two identical objects are equal unless they are both NaN or undefined.
   {
     Label not_identical;
@@ -3667,8 +3628,9 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
 
 
 void CallFunctionStub::Generate(MacroAssembler* masm) {
-  // rdi : the function to call
   // rbx : cache cell for call target
+  // rdi : the function to call
+  Isolate* isolate = masm->isolate();
   Label slow, non_function;
 
   // The receiver might implicitly be the global object. This is
@@ -3683,9 +3645,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
     __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
     __ j(not_equal, &call, Label::kNear);
     // Patch the receiver on the stack with the global receiver object.
-    __ movq(rbx, GlobalObjectOperand());
-    __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
-    __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rbx);
+    __ movq(rcx, GlobalObjectOperand());
+    __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
+    __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rcx);
     __ bind(&call);
   }
 
@@ -3695,6 +3657,10 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
   __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
   __ j(not_equal, &slow);
 
+  if (RecordCallTarget()) {
+    GenerateRecordCallTarget(masm);
+  }
+
   // Fast-case: Just invoke the function.
   ParameterCount actual(argc_);
 
@@ -3717,6 +3683,13 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
 
   // Slow-case: Non-function called.
   __ bind(&slow);
+  if (RecordCallTarget()) {
+    // If there is a call target cache, mark it megamorphic in the
+    // non-function case.  MegamorphicSentinel is an immortal immovable
+    // object (undefined) so no write barrier is needed.
+    __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
+            TypeFeedbackCells::MegamorphicSentinel(isolate));
+  }
   // Check for function proxy.
   __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
   __ j(not_equal, &non_function);
@@ -5151,56 +5124,24 @@ void SubStringStub::Generate(MacroAssembler* masm) {
   // rax: string
   // rbx: instance type
   // Calculate length of sub string using the smi values.
-  Label result_longer_than_two;
   __ movq(rcx, Operand(rsp, kToOffset));
   __ movq(rdx, Operand(rsp, kFromOffset));
   __ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
 
   __ SmiSub(rcx, rcx, rdx);  // Overflow doesn't happen.
-  __ cmpq(FieldOperand(rax, String::kLengthOffset), rcx);
+  __ cmpq(rcx, FieldOperand(rax, String::kLengthOffset));
   Label not_original_string;
-  __ j(not_equal, &not_original_string, Label::kNear);
+  // Shorter than original string's length: an actual substring.
+  __ j(below, &not_original_string, Label::kNear);
+  // Longer than original string's length or negative: unsafe arguments.
+  __ j(above, &runtime);
+  // Return original string.
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->sub_string_native(), 1);
   __ ret(kArgumentsSize);
   __ bind(&not_original_string);
-  // Special handling of sub-strings of length 1 and 2. One character strings
-  // are handled in the runtime system (looked up in the single character
-  // cache). Two character strings are looked for in the symbol cache.
   __ SmiToInteger32(rcx, rcx);
-  __ cmpl(rcx, Immediate(2));
-  __ j(greater, &result_longer_than_two);
-  __ j(less, &runtime);
-
-  // Sub string of length 2 requested.
-  // rax: string
-  // rbx: instance type
-  // rcx: sub string length (value is 2)
-  // rdx: from index (smi)
-  __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &runtime);
-
-  // Get the two characters forming the sub string.
-  __ SmiToInteger32(rdx, rdx);  // From index is no longer smi.
-  __ movzxbq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize));
-  __ movzxbq(rdi,
-             FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize + 1));
-
-  // Try to lookup two character string in symbol table.
-  Label make_two_character_string;
-  StringHelper::GenerateTwoCharacterSymbolTableProbe(
-      masm, rbx, rdi, r9, r11, r14, r15, &make_two_character_string);
-  __ IncrementCounter(counters->sub_string_native(), 1);
-  __ ret(3 * kPointerSize);
-
-  __ bind(&make_two_character_string);
-  // Set up registers for allocating the two character string.
-  __ movzxwq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize));
-  __ AllocateAsciiString(rax, rcx, r11, r14, r15, &runtime);
-  __ movw(FieldOperand(rax, SeqAsciiString::kHeaderSize), rbx);
-  __ IncrementCounter(counters->sub_string_native(), 1);
-  __ ret(3 * kPointerSize);
 
-  __ bind(&result_longer_than_two);
   // rax: string
   // rbx: instance type
   // rcx: sub string length
@@ -5756,14 +5697,8 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
 
   __ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
   __ j(not_equal, &miss, Label::kNear);
-  __ testb(FieldOperand(rcx, Map::kBitField2Offset),
-           Immediate(1 << Map::kUseUserObjectComparison));
-  __ j(not_zero, &miss, Label::kNear);
   __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
   __ j(not_equal, &miss, Label::kNear);
-  __ testb(FieldOperand(rcx, Map::kBitField2Offset),
-           Immediate(1 << Map::kUseUserObjectComparison));
-  __ j(not_zero, &miss, Label::kNear);
 
   ASSERT(GetCondition() == equal);
   __ subq(rax, rdx);
@@ -5783,14 +5718,8 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
   __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
   __ Cmp(rcx, known_map_);
   __ j(not_equal, &miss, Label::kNear);
-  __ testb(FieldOperand(rcx, Map::kBitField2Offset),
-           Immediate(1 << Map::kUseUserObjectComparison));
-  __ j(not_zero, &miss, Label::kNear);
   __ Cmp(rbx, known_map_);
   __ j(not_equal, &miss, Label::kNear);
-  __ testb(FieldOperand(rbx, Map::kBitField2Offset),
-           Immediate(1 << Map::kUseUserObjectComparison));
-  __ j(not_zero, &miss, Label::kNear);
 
   __ subq(rax, rdx);
   __ ret(0);
index eec83d9..1b29e58 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -91,6 +91,8 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
   rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
 }
 
+const bool Debug::FramePaddingLayout::kIsSupported = true;
+
 
 #define __ ACCESS_MASM(masm)
 
@@ -103,6 +105,12 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
+    // Load padding words on stack.
+    for (int i = 0; i < Debug::FramePaddingLayout::kInitialSize; i++) {
+      __ Push(Smi::FromInt(Debug::FramePaddingLayout::kPaddingValue));
+    }
+    __ Push(Smi::FromInt(Debug::FramePaddingLayout::kInitialSize));
+
     // Store the registers containing live values on the expression stack to
     // make sure that these are correctly updated during GC. Non object values
     // are stored as as two smis causing it to be untouched by GC.
@@ -157,6 +165,11 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
       }
     }
 
+    // Read current padding counter and skip corresponding number of words.
+    __ pop(kScratchRegister);
+    __ SmiToInteger32(kScratchRegister, kScratchRegister);
+    __ lea(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0));
+
     // Get rid of the internal frame.
   }
 
index 40b9a1c..f3046b9 100644 (file)
@@ -458,6 +458,8 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
 
 void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
                                               int frame_index) {
+  Builtins* builtins = isolate_->builtins();
+  Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
   JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
   unsigned height = iterator->Next();
   unsigned height_in_bytes = height * kPointerSize;
@@ -465,7 +467,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
     PrintF("  translating construct stub => height=%d\n", height_in_bytes);
   }
 
-  unsigned fixed_frame_size = 6 * kPointerSize;
+  unsigned fixed_frame_size = 7 * kPointerSize;
   unsigned output_frame_size = height_in_bytes + fixed_frame_size;
 
   // Allocate and store the output frame description.
@@ -534,6 +536,16 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
            top_address + output_offset, output_offset, value);
   }
 
+  // The output frame reflects a JSConstructStubGeneric frame.
+  output_offset -= kPointerSize;
+  value = reinterpret_cast<intptr_t>(construct_stub);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+           V8PRIxPTR " ; code object\n",
+           top_address + output_offset, output_offset, value);
+  }
+
   // Number of incoming arguments.
   output_offset -= kPointerSize;
   value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
@@ -557,8 +569,6 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
 
   ASSERT(0 == output_offset);
 
-  Builtins* builtins = isolate_->builtins();
-  Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
   intptr_t pc = reinterpret_cast<intptr_t>(
       construct_stub->instruction_start() +
       isolate_->heap()->construct_stub_deopt_pc_offset()->value());
index a0218f5..974269e 100644 (file)
@@ -171,13 +171,12 @@ void FullCodeGenerator::Generate() {
 
   // Possibly allocate a local context.
   int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
-  if (heap_slots > 0 ||
-      (scope()->is_qml_mode() && scope()->is_global_scope())) {
+  if (heap_slots > 0) {
     Comment cmnt(masm_, "[ Allocate local context");
     // Argument to NewContext is the function, which is still in rdi.
     __ push(rdi);
     if (heap_slots <= FastNewContextStub::kMaximumSlots) {
-      FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
+      FastNewContextStub stub(heap_slots);
       __ CallStub(&stub);
     } else {
       __ CallRuntime(Runtime::kNewFunctionContext, 1);
@@ -258,11 +257,11 @@ void FullCodeGenerator::Generate() {
       // For named function expressions, declare the function name as a
       // constant.
       if (scope()->is_function_scope() && scope()->function() != NULL) {
-        VariableProxy* proxy = scope()->function();
-        ASSERT(proxy->var()->mode() == CONST ||
-               proxy->var()->mode() == CONST_HARMONY);
-        ASSERT(proxy->var()->location() != Variable::UNALLOCATED);
-        EmitDeclaration(proxy, proxy->var()->mode(), NULL);
+        VariableDeclaration* function = scope()->function();
+        ASSERT(function->proxy()->var()->mode() == CONST ||
+               function->proxy()->var()->mode() == CONST_HARMONY);
+        ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
+        VisitVariableDeclaration(function);
       }
       VisitDeclarations(scope()->declarations());
     }
@@ -754,61 +753,51 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
 }
 
 
-void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
-                                        VariableMode mode,
-                                        FunctionLiteral* function) {
+void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
+  // The variable in the declaration always resides in the current function
+  // context.
+  ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+  if (FLAG_debug_code) {
+    // Check that we're not inside a with or catch context.
+    __ movq(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
+    __ CompareRoot(rbx, Heap::kWithContextMapRootIndex);
+    __ Check(not_equal, "Declaration in with context.");
+    __ CompareRoot(rbx, Heap::kCatchContextMapRootIndex);
+    __ Check(not_equal, "Declaration in catch context.");
+  }
+}
+
+
+void FullCodeGenerator::VisitVariableDeclaration(
+    VariableDeclaration* declaration) {
   // If it was not possible to allocate the variable at compile time, we
   // need to "declare" it at runtime to make sure it actually exists in the
   // local context.
+  VariableProxy* proxy = declaration->proxy();
+  VariableMode mode = declaration->mode();
   Variable* variable = proxy->var();
-  bool binding_needs_init = (function == NULL) &&
-      (mode == CONST || mode == CONST_HARMONY || mode == LET);
+  bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
   switch (variable->location()) {
     case Variable::UNALLOCATED:
-      ++global_count_;
+      globals_->Add(variable->name());
+      globals_->Add(variable->binding_needs_init()
+                        ? isolate()->factory()->the_hole_value()
+                        : isolate()->factory()->undefined_value());
       break;
 
     case Variable::PARAMETER:
     case Variable::LOCAL:
-      if (function != NULL) {
-        Comment cmnt(masm_, "[ Declaration");
-        VisitForAccumulatorValue(function);
-        __ movq(StackOperand(variable), result_register());
-      } else if (binding_needs_init) {
-        Comment cmnt(masm_, "[ Declaration");
+      if (hole_init) {
+        Comment cmnt(masm_, "[ VariableDeclaration");
         __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
         __ movq(StackOperand(variable), kScratchRegister);
       }
       break;
 
     case Variable::CONTEXT:
-      // The variable in the decl always resides in the current function
-      // context.
-      ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
-      if (FLAG_debug_code) {
-        // Check that we're not inside a with or catch context.
-        __ movq(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
-        __ CompareRoot(rbx, Heap::kWithContextMapRootIndex);
-        __ Check(not_equal, "Declaration in with context.");
-        __ CompareRoot(rbx, Heap::kCatchContextMapRootIndex);
-        __ Check(not_equal, "Declaration in catch context.");
-      }
-      if (function != NULL) {
-        Comment cmnt(masm_, "[ Declaration");
-        VisitForAccumulatorValue(function);
-        __ movq(ContextOperand(rsi, variable->index()), result_register());
-        int offset = Context::SlotOffset(variable->index());
-        // We know that we have written a function, which is not a smi.
-        __ RecordWriteContextSlot(rsi,
-                                  offset,
-                                  result_register(),
-                                  rcx,
-                                  kDontSaveFPRegs,
-                                  EMIT_REMEMBERED_SET,
-                                  OMIT_SMI_CHECK);
-        PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
-      } else if (binding_needs_init) {
-        Comment cmnt(masm_, "[ Declaration");
+      if (hole_init) {
+        Comment cmnt(masm_, "[ VariableDeclaration");
+        EmitDebugCheckDeclarationContext(variable);
         __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
         __ movq(ContextOperand(rsi, variable->index()), kScratchRegister);
         // No write barrier since the hole value is in old space.
@@ -817,14 +806,12 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
       break;
 
     case Variable::LOOKUP: {
-      Comment cmnt(masm_, "[ Declaration");
+      Comment cmnt(masm_, "[ VariableDeclaration");
       __ push(rsi);
       __ Push(variable->name());
       // Declaration nodes are always introduced in one of four modes.
-      ASSERT(mode == VAR ||
-             mode == CONST ||
-             mode == CONST_HARMONY ||
-             mode == LET);
+      ASSERT(mode == VAR || mode == LET ||
+             mode == CONST || mode == CONST_HARMONY);
       PropertyAttributes attr =
           (mode == CONST || mode == CONST_HARMONY) ? READ_ONLY : NONE;
       __ Push(Smi::FromInt(attr));
@@ -832,9 +819,7 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
       // Note: For variables we must not push an initial value (such as
       // 'undefined') because we may have a (legal) redeclaration and we
       // must not destroy the current value.
-      if (function != NULL) {
-        VisitForStackValue(function);
-      } else if (binding_needs_init) {
+      if (hole_init) {
         __ PushRoot(Heap::kTheHoleValueRootIndex);
       } else {
         __ Push(Smi::FromInt(0));  // Indicates no initial value.
@@ -846,6 +831,119 @@ void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
 }
 
 
+void FullCodeGenerator::VisitFunctionDeclaration(
+    FunctionDeclaration* declaration) {
+  VariableProxy* proxy = declaration->proxy();
+  Variable* variable = proxy->var();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      globals_->Add(variable->name());
+      Handle<SharedFunctionInfo> function =
+          Compiler::BuildFunctionInfo(declaration->fun(), script());
+      // Check for stack-overflow exception.
+      if (function.is_null()) return SetStackOverflow();
+      globals_->Add(function);
+      break;
+    }
+
+    case Variable::PARAMETER:
+    case Variable::LOCAL: {
+      Comment cmnt(masm_, "[ FunctionDeclaration");
+      VisitForAccumulatorValue(declaration->fun());
+      __ movq(StackOperand(variable), result_register());
+      break;
+    }
+
+    case Variable::CONTEXT: {
+      Comment cmnt(masm_, "[ FunctionDeclaration");
+      EmitDebugCheckDeclarationContext(variable);
+      VisitForAccumulatorValue(declaration->fun());
+      __ movq(ContextOperand(rsi, variable->index()), result_register());
+      int offset = Context::SlotOffset(variable->index());
+      // We know that we have written a function, which is not a smi.
+      __ RecordWriteContextSlot(rsi,
+                                offset,
+                                result_register(),
+                                rcx,
+                                kDontSaveFPRegs,
+                                EMIT_REMEMBERED_SET,
+                                OMIT_SMI_CHECK);
+      PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+      break;
+    }
+
+    case Variable::LOOKUP: {
+      Comment cmnt(masm_, "[ FunctionDeclaration");
+      __ push(rsi);
+      __ Push(variable->name());
+      __ Push(Smi::FromInt(NONE));
+      VisitForStackValue(declaration->fun());
+      __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+      break;
+    }
+  }
+}
+
+
+void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
+  VariableProxy* proxy = declaration->proxy();
+  Variable* variable = proxy->var();
+  Handle<JSModule> instance = declaration->module()->interface()->Instance();
+  ASSERT(!instance.is_null());
+
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      Comment cmnt(masm_, "[ ModuleDeclaration");
+      globals_->Add(variable->name());
+      globals_->Add(instance);
+      Visit(declaration->module());
+      break;
+    }
+
+    case Variable::CONTEXT: {
+      Comment cmnt(masm_, "[ ModuleDeclaration");
+      EmitDebugCheckDeclarationContext(variable);
+      __ Move(ContextOperand(rsi, variable->index()), instance);
+      Visit(declaration->module());
+      break;
+    }
+
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+    case Variable::LOOKUP:
+      UNREACHABLE();
+  }
+}
+
+
+void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
+  VariableProxy* proxy = declaration->proxy();
+  Variable* variable = proxy->var();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED:
+      // TODO(rossberg)
+      break;
+
+    case Variable::CONTEXT: {
+      Comment cmnt(masm_, "[ ImportDeclaration");
+      EmitDebugCheckDeclarationContext(variable);
+      // TODO(rossberg)
+      break;
+    }
+
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+    case Variable::LOOKUP:
+      UNREACHABLE();
+  }
+}
+
+
+void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
+  // TODO(rossberg)
+}
+
+
 void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.
   __ push(rsi);  // The context is the first argument.
@@ -1200,7 +1298,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
 
   // All extension objects were empty and it is safe to use a global
   // load IC call.
-  __ movq(rax, var->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+  __ movq(rax, GlobalObjectOperand());
   __ Move(rcx, var->name());
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
   RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
@@ -1285,7 +1383,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
       // Use inline caching. Variable name is passed in rcx and the global
       // object on the stack.
       __ Move(rcx, var->name());
-      __ movq(rax, var->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+      __ movq(rax, GlobalObjectOperand());
       Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
       CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
       context()->Plug(rax);
@@ -1912,7 +2010,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
   if (var->IsUnallocated()) {
     // Global var, const, or let.
     __ Move(rcx, var->name());
-    __ movq(rdx, var->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+    __ movq(rdx, GlobalObjectOperand());
     Handle<Code> ic = is_classic_mode()
         ? isolate()->builtins()->StoreIC_Initialize()
         : isolate()->builtins()->StoreIC_Initialize_Strict();
@@ -2175,6 +2273,18 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
   }
   // Record source position for debugger.
   SetSourcePosition(expr->position());
+
+  // Record call targets in unoptimized code, but not in the snapshot.
+  if (!Serializer::enabled()) {
+    flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
+    Handle<Object> uninitialized =
+        TypeFeedbackCells::UninitializedSentinel(isolate());
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+    RecordTypeFeedbackCell(expr->id(), cell);
+    __ Move(rbx, cell);
+  }
+
   CallFunctionStub stub(arg_count, flags);
   __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
   __ CallStub(&stub);
@@ -2203,11 +2313,8 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
   // Push the start position of the scope the calls resides in.
   __ Push(Smi::FromInt(scope()->start_position()));
 
-  // Push the qml mode flag
-  __ Push(Smi::FromInt(is_qml_mode()));
-
   // Do the runtime call.
-  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
+  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
 }
 
 
@@ -2260,7 +2367,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
   } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
     // Call to a global variable.  Push global object as receiver for the
     // call IC lookup.
-    __ push(proxy->var()->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+    __ push(GlobalObjectOperand());
     EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
   } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
     // Call to a lookup slot (dynamically introduced variable).
@@ -3253,102 +3360,6 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
 }
 
 
-void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 3);
-  VisitForStackValue(args->at(0));
-  VisitForStackValue(args->at(1));
-  VisitForStackValue(args->at(2));
-  Label done;
-  Label slow_case;
-  Register object = rax;
-  Register index_1 = rbx;
-  Register index_2 = rcx;
-  Register elements = rdi;
-  Register temp = rdx;
-  __ movq(object, Operand(rsp, 2 * kPointerSize));
-  // Fetch the map and check if array is in fast case.
-  // Check that object doesn't require security checks and
-  // has no indexed interceptor.
-  __ CmpObjectType(object, JS_ARRAY_TYPE, temp);
-  __ j(not_equal, &slow_case);
-  __ testb(FieldOperand(temp, Map::kBitFieldOffset),
-           Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
-  __ j(not_zero, &slow_case);
-
-  // Check the object's elements are in fast case and writable.
-  __ movq(elements, FieldOperand(object, JSObject::kElementsOffset));
-  __ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
-                 Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &slow_case);
-
-  // Check that both indices are smis.
-  __ movq(index_1, Operand(rsp, 1 * kPointerSize));
-  __ movq(index_2, Operand(rsp, 0 * kPointerSize));
-  __ JumpIfNotBothSmi(index_1, index_2, &slow_case);
-
-  // Check that both indices are valid.
-  // The JSArray length field is a smi since the array is in fast case mode.
-  __ movq(temp, FieldOperand(object, JSArray::kLengthOffset));
-  __ SmiCompare(temp, index_1);
-  __ j(below_equal, &slow_case);
-  __ SmiCompare(temp, index_2);
-  __ j(below_equal, &slow_case);
-
-  __ SmiToInteger32(index_1, index_1);
-  __ SmiToInteger32(index_2, index_2);
-  // Bring addresses into index1 and index2.
-  __ lea(index_1, FieldOperand(elements, index_1, times_pointer_size,
-                               FixedArray::kHeaderSize));
-  __ lea(index_2, FieldOperand(elements, index_2, times_pointer_size,
-                               FixedArray::kHeaderSize));
-
-  // Swap elements.  Use object and temp as scratch registers.
-  __ movq(object, Operand(index_1, 0));
-  __ movq(temp,   Operand(index_2, 0));
-  __ movq(Operand(index_2, 0), object);
-  __ movq(Operand(index_1, 0), temp);
-
-  Label no_remembered_set;
-  __ CheckPageFlag(elements,
-                   temp,
-                   1 << MemoryChunk::SCAN_ON_SCAVENGE,
-                   not_zero,
-                   &no_remembered_set,
-                   Label::kNear);
-  // Possible optimization: do a check that both values are Smis
-  // (or them and test against Smi mask.)
-
-  // We are swapping two objects in an array and the incremental marker never
-  // pauses in the middle of scanning a single object.  Therefore the
-  // incremental marker is not disturbed, so we don't need to call the
-  // RecordWrite stub that notifies the incremental marker.
-  __ RememberedSetHelper(elements,
-                         index_1,
-                         temp,
-                         kDontSaveFPRegs,
-                         MacroAssembler::kFallThroughAtEnd);
-  __ RememberedSetHelper(elements,
-                         index_2,
-                         temp,
-                         kDontSaveFPRegs,
-                         MacroAssembler::kFallThroughAtEnd);
-
-  __ bind(&no_remembered_set);
-
-  // We are done. Drop elements from the stack, and return undefined.
-  __ addq(rsp, Immediate(3 * kPointerSize));
-  __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-  __ jmp(&done);
-
-  __ bind(&slow_case);
-  __ CallRuntime(Runtime::kSwapElements, 3);
-
-  __ bind(&done);
-  context()->Plug(rax);
-}
-
-
 void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(2, args->length());
@@ -3830,7 +3841,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
         // but "delete this" is allowed.
         ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
         if (var->IsUnallocated()) {
-          __ push(var->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+          __ push(GlobalObjectOperand());
           __ Push(var->name());
           __ Push(Smi::FromInt(kNonStrictMode));
           __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
@@ -4152,7 +4163,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
   if (proxy != NULL && proxy->var()->IsUnallocated()) {
     Comment cmnt(masm_, "Global variable");
     __ Move(rcx, proxy->name());
-    __ movq(rax, proxy->var()->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+    __ movq(rax, GlobalObjectOperand());
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
@@ -4416,7 +4427,8 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
 
 void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
   Scope* declaration_scope = scope()->DeclarationScope();
-  if (declaration_scope->is_global_scope()) {
+  if (declaration_scope->is_global_scope() ||
+      declaration_scope->is_module_scope()) {
     // Contexts nested in the global context have a canonical empty function
     // as their closure, not the anonymous closure containing the global
     // code.  Pass a smi sentinel and let the runtime look up the empty
index 0632ce4..6ba5fb6 100644 (file)
@@ -1741,11 +1741,11 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
 
   // Activate inlined smi code.
   if (previous_state == UNINITIALIZED) {
-    PatchInlinedSmiCode(address());
+    PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
   }
 }
 
-void PatchInlinedSmiCode(Address address) {
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
   // The address of the instruction following the call.
   Address test_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
@@ -1766,14 +1766,18 @@ void PatchInlinedSmiCode(Address address) {
            address, test_instruction_address, delta);
   }
 
-  // Patch with a short conditional jump. There must be a
-  // short jump-if-carry/not-carry at this position.
+  // Patch with a short conditional jump. Enabling means switching from a short
+  // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
+  // reverse operation of that.
   Address jmp_address = test_instruction_address - delta;
-  ASSERT(*jmp_address == Assembler::kJncShortOpcode ||
-         *jmp_address == Assembler::kJcShortOpcode);
-  Condition cc = *jmp_address == Assembler::kJncShortOpcode
-      ? not_zero
-      : zero;
+  ASSERT((check == ENABLE_INLINED_SMI_CHECK)
+         ? (*jmp_address == Assembler::kJncShortOpcode ||
+            *jmp_address == Assembler::kJcShortOpcode)
+         : (*jmp_address == Assembler::kJnzShortOpcode ||
+            *jmp_address == Assembler::kJzShortOpcode));
+  Condition cc = (check == ENABLE_INLINED_SMI_CHECK)
+      ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
+      : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
   *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
 }
 
index dc15213..d1cf338 100644 (file)
@@ -184,13 +184,12 @@ bool LCodeGen::GeneratePrologue() {
 
   // Possibly allocate a local context.
   int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
-  if (heap_slots > 0 ||
-      (scope()->is_qml_mode() && scope()->is_global_scope())) {
+  if (heap_slots > 0) {
     Comment(";;; Allocate local context");
     // Argument to NewContext is the function, which is still in rdi.
     __ push(rdi);
     if (heap_slots <= FastNewContextStub::kMaximumSlots) {
-      FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
+      FastNewContextStub stub(heap_slots);
       __ CallStub(&stub);
     } else {
       __ CallRuntime(Runtime::kNewFunctionContext, 1);
@@ -2017,8 +2016,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                     RECORD_SAFEPOINT_WITH_REGISTERS,
                     2);
     ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check));
-    ASSERT(instr->HasDeoptimizationEnvironment());
-    LEnvironment* env = instr->deoptimization_environment();
+    LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
     safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
     // Move result to a register that survives the end of the
     // PushSafepointRegisterScope.
@@ -2225,41 +2223,35 @@ void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
   Register result = ToRegister(instr->result());
 
   int map_count = instr->hydrogen()->types()->length();
-  Handle<String> name = instr->hydrogen()->name();
+  bool need_generic = instr->hydrogen()->need_generic();
 
-  if (map_count == 0) {
-    ASSERT(instr->hydrogen()->need_generic());
-    __ Move(rcx, instr->hydrogen()->name());
-    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-    CallCode(ic, RelocInfo::CODE_TARGET, instr);
-  } else {
-    Label done;
-    for (int i = 0; i < map_count - 1; ++i) {
-      Handle<Map> map = instr->hydrogen()->types()->at(i);
+  if (map_count == 0 && !need_generic) {
+    DeoptimizeIf(no_condition, instr->environment());
+    return;
+  }
+  Handle<String> name = instr->hydrogen()->name();
+  Label done;
+  for (int i = 0; i < map_count; ++i) {
+    bool last = (i == map_count - 1);
+    Handle<Map> map = instr->hydrogen()->types()->at(i);
+    __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
+    if (last && !need_generic) {
+      DeoptimizeIf(not_equal, instr->environment());
+      EmitLoadFieldOrConstantFunction(result, object, map, name);
+    } else {
       Label next;
-      __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
       __ j(not_equal, &next, Label::kNear);
       EmitLoadFieldOrConstantFunction(result, object, map, name);
       __ jmp(&done, Label::kNear);
       __ bind(&next);
     }
-    Handle<Map> map = instr->hydrogen()->types()->last();
-    __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map);
-    if (instr->hydrogen()->need_generic()) {
-      Label generic;
-      __ j(not_equal, &generic, Label::kNear);
-      EmitLoadFieldOrConstantFunction(result, object, map, name);
-      __ jmp(&done, Label::kNear);
-      __ bind(&generic);
-      __ Move(rcx, instr->hydrogen()->name());
-      Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-      CallCode(ic, RelocInfo::CODE_TARGET, instr);
-    } else {
-      DeoptimizeIf(not_equal, instr->environment());
-      EmitLoadFieldOrConstantFunction(result, object, map, name);
-    }
-    __ bind(&done);
   }
+  if (need_generic) {
+    __ Move(rcx, name);
+    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+    CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  }
+  __ bind(&done);
 }
 
 
@@ -2377,11 +2369,20 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
   Register result = ToRegister(instr->result());
 
+  if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
+    // Sign extend key because it could be a 32 bit negative value
+    // and the dehoisted address computation happens in 64 bits.
+    Register key_reg = ToRegister(instr->key());
+    __ movsxlq(key_reg, key_reg);
+  }
+
   // Load the result.
   __ movq(result,
-          BuildFastArrayOperand(instr->elements(), instr->key(),
+          BuildFastArrayOperand(instr->elements(),
+                                instr->key(),
                                 FAST_ELEMENTS,
-                                FixedArray::kHeaderSize - kHeapObjectTag));
+                                FixedArray::kHeaderSize - kHeapObjectTag,
+                                instr->additional_index()));
 
   // Check for the hole value.
   if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -2395,19 +2396,30 @@ void LCodeGen::DoLoadKeyedFastDoubleElement(
     LLoadKeyedFastDoubleElement* instr) {
   XMMRegister result(ToDoubleRegister(instr->result()));
 
+  if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
+    // Sign extend key because it could be a 32 bit negative value
+    // and the dehoisted address computation happens in 64 bits
+    Register key_reg = ToRegister(instr->key());
+    __ movsxlq(key_reg, key_reg);
+  }
+
   int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
       sizeof(kHoleNanLower32);
   Operand hole_check_operand = BuildFastArrayOperand(
       instr->elements(),
       instr->key(),
       FAST_DOUBLE_ELEMENTS,
-      offset);
+      offset,
+      instr->additional_index());
   __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
   DeoptimizeIf(equal, instr->environment());
 
   Operand double_load_operand = BuildFastArrayOperand(
-      instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
-      FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+      instr->elements(),
+      instr->key(),
+      FAST_DOUBLE_ELEMENTS,
+      FixedDoubleArray::kHeaderSize - kHeapObjectTag,
+      instr->additional_index());
   __ movsd(result, double_load_operand);
 }
 
@@ -2416,7 +2428,8 @@ Operand LCodeGen::BuildFastArrayOperand(
     LOperand* elements_pointer,
     LOperand* key,
     ElementsKind elements_kind,
-    uint32_t offset) {
+    uint32_t offset,
+    uint32_t additional_index) {
   Register elements_pointer_reg = ToRegister(elements_pointer);
   int shift_size = ElementsKindToShiftSize(elements_kind);
   if (key->IsConstantOperand()) {
@@ -2425,11 +2438,14 @@ Operand LCodeGen::BuildFastArrayOperand(
       Abort("array index constant value too big");
     }
     return Operand(elements_pointer_reg,
-                   constant_value * (1 << shift_size) + offset);
+                   ((constant_value + additional_index) << shift_size)
+                       + offset);
   } else {
     ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
-    return Operand(elements_pointer_reg, ToRegister(key),
-                   scale_factor, offset);
+    return Operand(elements_pointer_reg,
+                   ToRegister(key),
+                   scale_factor,
+                   offset + (additional_index << shift_size));
   }
 }
 
@@ -2438,7 +2454,17 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement(
     LLoadKeyedSpecializedArrayElement* instr) {
   ElementsKind elements_kind = instr->elements_kind();
   Operand operand(BuildFastArrayOperand(instr->external_pointer(),
-                                        instr->key(), elements_kind, 0));
+                                        instr->key(),
+                                        elements_kind,
+                                        0,
+                                        instr->additional_index()));
+  if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
+    // Sign extend key because it could be a 32 bit negative value
+    // and the dehoisted address computation happens in 64 bits
+    Register key_reg = ToRegister(instr->key());
+    __ movsxlq(key_reg, key_reg);
+  }
+
   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
     XMMRegister result(ToDoubleRegister(instr->result()));
     __ movss(result, operand);
@@ -2498,24 +2524,28 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   Register result = ToRegister(instr->result());
 
-  // Check for arguments adapter frame.
-  Label done, adapted;
-  __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
-         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ j(equal, &adapted, Label::kNear);
-
-  // No arguments adaptor frame.
-  __ movq(result, rbp);
-  __ jmp(&done, Label::kNear);
+  if (instr->hydrogen()->from_inlined()) {
+    __ lea(result, Operand(rsp, -2 * kPointerSize));
+  } else {
+    // Check for arguments adapter frame.
+    Label done, adapted;
+    __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+    __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
+           Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+    __ j(equal, &adapted, Label::kNear);
+
+    // No arguments adaptor frame.
+    __ movq(result, rbp);
+    __ jmp(&done, Label::kNear);
 
-  // Arguments adaptor frame present.
-  __ bind(&adapted);
-  __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+    // Arguments adaptor frame present.
+    __ bind(&adapted);
+    __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
 
-  // Result is the frame pointer for the frame if not adapted and for the real
-  // frame below the adaptor frame if adapted.
-  __ bind(&done);
+    // Result is the frame pointer for the frame if not adapted and for the real
+    // frame below the adaptor frame if adapted.
+    __ bind(&done);
+  }
 }
 
 
@@ -2623,7 +2653,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
 
   // Invoke the function.
   __ bind(&invoke);
-  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  ASSERT(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
   SafepointGenerator safepoint_generator(
@@ -2641,6 +2671,11 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
 }
 
 
+void LCodeGen::DoDrop(LDrop* instr) {
+  __ Drop(instr->count());
+}
+
+
 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   Register result = ToRegister(instr->result());
   __ LoadHeapObject(result, instr->hydrogen()->closure());
@@ -2671,7 +2706,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
 
 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
   Register result = ToRegister(instr->result());
-  __ movq(result, instr->qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+  __ movq(result, GlobalObjectOperand());
 }
 
 
@@ -2685,7 +2720,8 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
                                  int arity,
                                  LInstruction* instr,
-                                 CallKind call_kind) {
+                                 CallKind call_kind,
+                                 RDIState rdi_state) {
   bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
       function->shared()->formal_parameter_count() == arity;
 
@@ -2693,7 +2729,9 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
   RecordPosition(pointers->position());
 
   if (can_invoke_directly) {
-    __ LoadHeapObject(rdi, function);
+    if (rdi_state == RDI_UNINITIALIZED) {
+      __ LoadHeapObject(rdi, function);
+    }
 
     // Change context if needed.
     bool change_context =
@@ -2738,7 +2776,8 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   CallKnownFunction(instr->function(),
                     instr->arity(),
                     instr,
-                    CALL_AS_METHOD);
+                    CALL_AS_METHOD,
+                    RDI_UNINITIALIZED);
 }
 
 
@@ -3175,13 +3214,21 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
   ASSERT(ToRegister(instr->function()).is(rdi));
   ASSERT(instr->HasPointerMap());
-  ASSERT(instr->HasDeoptimizationEnvironment());
-  LPointerMap* pointers = instr->pointer_map();
-  RecordPosition(pointers->position());
-  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-  ParameterCount count(instr->arity());
-  __ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
-  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+
+  if (instr->known_function().is_null()) {
+    LPointerMap* pointers = instr->pointer_map();
+    RecordPosition(pointers->position());
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(instr->arity());
+    __ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
+    __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  } else {
+    CallKnownFunction(instr->known_function(),
+                      instr->arity(),
+                      instr,
+                      CALL_AS_METHOD,
+                      RDI_CONTAINS_TARGET);
+  }
 }
 
 
@@ -3235,7 +3282,11 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
 
 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   ASSERT(ToRegister(instr->result()).is(rax));
-  CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
+  CallKnownFunction(instr->target(),
+                    instr->arity(),
+                    instr,
+                    CALL_AS_FUNCTION,
+                    RDI_UNINITIALIZED);
 }
 
 
@@ -3315,7 +3366,18 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement(
     LStoreKeyedSpecializedArrayElement* instr) {
   ElementsKind elements_kind = instr->elements_kind();
   Operand operand(BuildFastArrayOperand(instr->external_pointer(),
-                                        instr->key(), elements_kind, 0));
+                                        instr->key(),
+                                        elements_kind,
+                                        0,
+                                        instr->additional_index()));
+
+  if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
+    // Sign extend key because it could be a 32 bit negative value
+    // and the dehoisted address computation happens in 64 bits
+    Register key_reg = ToRegister(instr->key());
+    __ movsxlq(key_reg, key_reg);
+  }
+
   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
     XMMRegister value(ToDoubleRegister(instr->value()));
     __ cvtsd2ss(value, value);
@@ -3385,30 +3447,29 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
   Register elements = ToRegister(instr->object());
   Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
 
-  // Do the store.
-  if (instr->key()->IsConstantOperand()) {
-    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
-    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
-    int offset =
-        ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
-    __ movq(FieldOperand(elements, offset), value);
-  } else {
-    __ movq(FieldOperand(elements,
-                         key,
-                         times_pointer_size,
-                         FixedArray::kHeaderSize),
-            value);
+  Operand operand =
+      BuildFastArrayOperand(instr->object(),
+                            instr->key(),
+                            FAST_ELEMENTS,
+                            FixedArray::kHeaderSize - kHeapObjectTag,
+                            instr->additional_index());
+
+  if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
+    // Sign extend key because it could be a 32 bit negative value
+    // and the dehoisted address computation happens in 64 bits
+    Register key_reg = ToRegister(instr->key());
+    __ movsxlq(key_reg, key_reg);
   }
 
+  __ movq(operand, value);
+
   if (instr->hydrogen()->NeedsWriteBarrier()) {
+    ASSERT(!instr->key()->IsConstantOperand());
     HType type = instr->hydrogen()->value()->type();
     SmiCheck check_needed =
         type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     // Compute address of modified element and store it into key register.
-    __ lea(key, FieldOperand(elements,
-                             key,
-                             times_pointer_size,
-                             FixedArray::kHeaderSize));
+    __ lea(key, operand);
     __ RecordWrite(elements,
                    key,
                    value,
@@ -3422,19 +3483,34 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
 void LCodeGen::DoStoreKeyedFastDoubleElement(
     LStoreKeyedFastDoubleElement* instr) {
   XMMRegister value = ToDoubleRegister(instr->value());
-  Label have_value;
 
-  __ ucomisd(value, value);
-  __ j(parity_odd, &have_value);  // NaN.
+  if (instr->NeedsCanonicalization()) {
+    Label have_value;
+
+    __ ucomisd(value, value);
+    __ j(parity_odd, &have_value);  // NaN.
 
-  __ Set(kScratchRegister, BitCast<uint64_t>(
-      FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
-  __ movq(value, kScratchRegister);
+    __ Set(kScratchRegister, BitCast<uint64_t>(
+        FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+    __ movq(value, kScratchRegister);
+
+    __ bind(&have_value);
+  }
 
-  __ bind(&have_value);
   Operand double_store_operand = BuildFastArrayOperand(
-      instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
-      FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+      instr->elements(),
+      instr->key(),
+      FAST_DOUBLE_ELEMENTS,
+      FixedDoubleArray::kHeaderSize - kHeapObjectTag,
+      instr->additional_index());
+
+  if (instr->hydrogen()->IsDehoisted() && !instr->key()->IsConstantOperand()) {
+    // Sign extend key because it could be a 32 bit negative value
+    // and the dehoisted address computation happens in 64 bits
+    Register key_reg = ToRegister(instr->key());
+    __ movsxlq(key_reg, key_reg);
+  }
+
   __ movsd(double_store_operand, value);
 }
 
@@ -4268,9 +4344,10 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
         __ movq(FieldOperand(result, total_offset), rcx);
       }
     } else if (elements->IsFixedArray()) {
+      Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
       for (int i = 0; i < elements_length; i++) {
         int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
-        Handle<Object> value = JSObject::GetElement(object, i);
+        Handle<Object> value(fast_elements->get(i));
         if (value->IsJSObject()) {
           Handle<JSObject> value_object = Handle<JSObject>::cast(value);
           __ lea(rcx, Operand(result, *offset));
@@ -4294,6 +4371,23 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
 
 void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
   int size = instr->hydrogen()->total_size();
+  ElementsKind boilerplate_elements_kind =
+      instr->hydrogen()->boilerplate()->GetElementsKind();
+
+  // Deopt if the literal boilerplate ElementsKind is of a type different than
+  // the expected one. The check isn't necessary if the boilerplate has already
+  // been converted to FAST_ELEMENTS.
+  if (boilerplate_elements_kind != FAST_ELEMENTS) {
+    __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
+    __ movq(rcx, FieldOperand(rbx, HeapObject::kMapOffset));
+    // Load the map's "bit field 2".
+    __ movb(rcx, FieldOperand(rcx, Map::kBitField2Offset));
+    // Retrieve elements_kind from bit field 2.
+    __ and_(rcx, Immediate(Map::kElementsKindMask));
+    __ cmpb(rcx, Immediate(boilerplate_elements_kind <<
+                           Map::kElementsKindShift));
+    DeoptimizeIf(not_equal, instr->environment());
+  }
 
   // Allocate all objects that are part of the literal in one big
   // allocation. This avoids multiple limit checks.
@@ -4592,7 +4686,7 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
   LOperand* key = instr->key();
   EmitPushTaggedOperand(obj);
   EmitPushTaggedOperand(key);
-  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  ASSERT(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
   // Create safepoint generator that will also ensure enough space in the
@@ -4610,7 +4704,7 @@ void LCodeGen::DoIn(LIn* instr) {
   LOperand* key = instr->key();
   EmitPushTaggedOperand(key);
   EmitPushTaggedOperand(obj);
-  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
+  ASSERT(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
   SafepointGenerator safepoint_generator(
index f5045b6..73e1a9b 100644 (file)
@@ -196,12 +196,18 @@ class LCodeGen BASE_EMBEDDED {
                                int argc,
                                LInstruction* instr);
 
+  enum RDIState {
+    RDI_UNINITIALIZED,
+    RDI_CONTAINS_TARGET
+  };
+
   // Generate a direct call to a known function.  Expects the function
   // to be in rdi.
   void CallKnownFunction(Handle<JSFunction> function,
                          int arity,
                          LInstruction* instr,
-                         CallKind call_kind);
+                         CallKind call_kind,
+                         RDIState rdi_state);
 
 
   void RecordSafepointWithLazyDeopt(LInstruction* instr,
@@ -225,7 +231,8 @@ class LCodeGen BASE_EMBEDDED {
       LOperand* elements_pointer,
       LOperand* key,
       ElementsKind elements_kind,
-      uint32_t offset);
+      uint32_t offset,
+      uint32_t additional_index = 0);
 
   // Specific math operations - used from DoUnaryMathOperation.
   void EmitIntegerMathAbs(LUnaryMathOperation* instr);
index 95c3a8e..3ba0cae 100644 (file)
@@ -110,22 +110,17 @@ void LInstruction::PrintTo(StringStream* stream) {
 }
 
 
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
+void LInstruction::PrintDataTo(StringStream* stream) {
   stream->Add("= ");
-  for (int i = 0; i < inputs_.length(); i++) {
+  for (int i = 0; i < InputCount(); i++) {
     if (i > 0) stream->Add(" ");
-    inputs_[i]->PrintTo(stream);
+    InputAt(i)->PrintTo(stream);
   }
 }
 
 
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
-  for (int i = 0; i < results_.length(); i++) {
-    if (i > 0) stream->Add(" ");
-    results_[i]->PrintTo(stream);
-  }
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+  if (HasResult()) result()->PrintTo(stream);
 }
 
 
@@ -727,22 +722,6 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
 }
 
 
-LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
-    LInstruction* instr, int ast_id) {
-  ASSERT(instruction_pending_deoptimization_environment_ == NULL);
-  ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
-  instruction_pending_deoptimization_environment_ = instr;
-  pending_deoptimization_ast_id_ = ast_id;
-  return instr;
-}
-
-
-void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
-  instruction_pending_deoptimization_environment_ = NULL;
-  pending_deoptimization_ast_id_ = AstNode::kNoNumber;
-}
-
-
 LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
                                         HInstruction* hinstr,
                                         CanDeoptimize can_deoptimize) {
@@ -755,8 +734,10 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
   if (hinstr->HasObservableSideEffects()) {
     ASSERT(hinstr->next()->IsSimulate());
     HSimulate* sim = HSimulate::cast(hinstr->next());
-    instr = SetInstructionPendingDeoptimizationEnvironment(
-        instr, sim->ast_id());
+    ASSERT(instruction_pending_deoptimization_environment_ == NULL);
+    ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+    instruction_pending_deoptimization_environment_ = instr;
+    pending_deoptimization_ast_id_ = sim->ast_id();
   }
 
   // If instruction does not have side-effects lazy deoptimization
@@ -774,12 +755,6 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
 }
 
 
-LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
-  instr->MarkAsSaveDoubles();
-  return instr;
-}
-
-
 LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
   ASSERT(!instr->HasPointerMap());
   instr->set_pointer_map(new(zone()) LPointerMap(position_));
@@ -1144,7 +1119,7 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
 
 
 LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
-  return DefineAsRegister(new(zone()) LGlobalObject(instr->qml_global()));
+  return DefineAsRegister(new(zone()) LGlobalObject);
 }
 
 
@@ -1214,7 +1189,7 @@ LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
 
 LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new(zone()) LCallGlobal(instr->qml_global()), rax), instr);
+  return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, rax), instr);
 }
 
 
@@ -1285,6 +1260,7 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
 LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
   ASSERT(instr->value()->representation().IsInteger32());
   ASSERT(instr->representation().IsInteger32());
+  if (instr->HasNoUses()) return NULL;
   LOperand* input = UseRegisterAtStart(instr->value());
   LBitNotI* result = new(zone()) LBitNotI(input);
   return DefineSameAsFirst(result);
@@ -1309,6 +1285,12 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
 }
 
 
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+
 LInstruction* LChunkBuilder::DoMod(HMod* instr) {
   if (instr->representation().IsInteger32()) {
     ASSERT(instr->left()->representation().IsInteger32());
@@ -2241,9 +2223,12 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
   if (pending_deoptimization_ast_id_ == instr->ast_id()) {
     LLazyBailout* lazy_bailout = new(zone()) LLazyBailout;
     LInstruction* result = AssignEnvironment(lazy_bailout);
+    // Store the lazy deopt environment with the instruction if needed. Right
+    // now it is only used for LInstanceOfKnownGlobal.
     instruction_pending_deoptimization_environment_->
-        set_deoptimization_environment(result->environment());
-    ClearInstructionPendingDeoptimizationEnvironment();
+        SetDeferredLazyDeoptimizationEnvironment(result->environment());
+    instruction_pending_deoptimization_environment_ = NULL;
+    pending_deoptimization_ast_id_ = AstNode::kNoNumber;
     return result;
   }
 
@@ -2270,8 +2255,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
                                                undefined,
                                                instr->call_kind(),
                                                instr->is_construct());
-  if (instr->arguments() != NULL) {
-    inner->Bind(instr->arguments(), graph()->GetArgumentsObject());
+  if (instr->arguments_var() != NULL) {
+    inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
   }
   current_block_->UpdateEnvironment(inner);
   chunk_->AddInlinedClosure(instr->closure());
@@ -2280,10 +2265,21 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
 
 
 LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+  LInstruction* pop = NULL;
+
+  HEnvironment* env = current_block_->last_environment();
+
+  if (instr->arguments_pushed()) {
+    int argument_count = env->arguments_environment()->parameter_count();
+    pop = new(zone()) LDrop(argument_count);
+    argument_count_ -= argument_count;
+  }
+
   HEnvironment* outer = current_block_->last_environment()->
       DiscardInlined(false);
   current_block_->UpdateEnvironment(outer);
-  return NULL;
+
+  return pop;
 }
 
 
index 390eb49..642a0a0 100644 (file)
@@ -179,7 +179,8 @@ class LCodeGen;
   V(CheckMapValue)                              \
   V(LoadFieldByIndex)                           \
   V(DateField)                                  \
-  V(WrapReceiver)
+  V(WrapReceiver)                               \
+  V(Drop)
 
 
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
@@ -203,16 +204,15 @@ class LInstruction: public ZoneObject {
   LInstruction()
       :  environment_(NULL),
          hydrogen_value_(NULL),
-         is_call_(false),
-         is_save_doubles_(false) { }
+         is_call_(false) { }
 
   virtual ~LInstruction() { }
 
   virtual void CompileToNative(LCodeGen* generator) = 0;
   virtual const char* Mnemonic() const = 0;
   virtual void PrintTo(StringStream* stream);
-  virtual void PrintDataTo(StringStream* stream) = 0;
-  virtual void PrintOutputOperandTo(StringStream* stream) = 0;
+  virtual void PrintDataTo(StringStream* stream);
+  virtual void PrintOutputOperandTo(StringStream* stream);
 
   enum Opcode {
     // Declare a unique enum value for each instruction.
@@ -247,22 +247,12 @@ class LInstruction: public ZoneObject {
   void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
   HValue* hydrogen_value() const { return hydrogen_value_; }
 
-  void set_deoptimization_environment(LEnvironment* env) {
-    deoptimization_environment_.set(env);
-  }
-  LEnvironment* deoptimization_environment() const {
-    return deoptimization_environment_.get();
-  }
-  bool HasDeoptimizationEnvironment() const {
-    return deoptimization_environment_.is_set();
-  }
-
   void MarkAsCall() { is_call_ = true; }
-  void MarkAsSaveDoubles() { is_save_doubles_ = true; }
+
+  virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
 
   // Interface to the register allocator and iterators.
   bool IsMarkedAsCall() const { return is_call_; }
-  bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
 
   virtual bool HasResult() const = 0;
   virtual LOperand* result() = 0;
@@ -283,9 +273,7 @@ class LInstruction: public ZoneObject {
   LEnvironment* environment_;
   SetOncePointer<LPointerMap> pointer_map_;
   HValue* hydrogen_value_;
-  SetOncePointer<LEnvironment> deoptimization_environment_;
   bool is_call_;
-  bool is_save_doubles_;
 };
 
 
@@ -307,9 +295,6 @@ class LTemplateInstruction: public LInstruction {
   int TempCount() { return T; }
   LOperand* TempAt(int i) { return temps_[i]; }
 
-  virtual void PrintDataTo(StringStream* stream);
-  virtual void PrintOutputOperandTo(StringStream* stream);
-
  protected:
   EmbeddedContainer<LOperand*, R> results_;
   EmbeddedContainer<LOperand*, I> inputs_;
@@ -535,9 +520,8 @@ class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
 
 class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
  public:
-  LArgumentsElements() { }
-
   DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+  DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
 };
 
 
@@ -831,6 +815,15 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
   DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
 
   Handle<JSFunction> function() const { return hydrogen()->function(); }
+  LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
+    return lazy_deopt_env_;
+  }
+  virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {
+    lazy_deopt_env_ = env;
+  }
+
+ private:
+  LEnvironment* lazy_deopt_env_;
 };
 
 
@@ -1206,6 +1199,7 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
 
   LOperand* elements() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
+  uint32_t additional_index() const { return hydrogen()->index_offset(); }
 };
 
 
@@ -1222,13 +1216,13 @@ class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
 
   LOperand* elements() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
+  uint32_t additional_index() const { return hydrogen()->index_offset(); }
 };
 
 
 class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
  public:
-  LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
-                                    LOperand* key) {
+  LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
     inputs_[0] = external_pointer;
     inputs_[1] = key;
   }
@@ -1242,6 +1236,7 @@ class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
   ElementsKind elements_kind() const {
     return hydrogen()->elements_kind();
   }
+  uint32_t additional_index() const { return hydrogen()->index_offset(); }
 };
 
 
@@ -1358,6 +1353,19 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
 };
 
 
+class LDrop: public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LDrop(int count) : count_(count) { }
+
+  int count() const { return count_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+  int count_;
+};
+
+
 class LThisFunction: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
@@ -1392,13 +1400,7 @@ class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
 
 class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
  public:
-  explicit LGlobalObject(bool qml_global) : qml_global_(qml_global) {}
-
   DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
-
-  bool qml_global() { return qml_global_; }
- private:
-  bool qml_global_;
 };
 
 
@@ -1440,6 +1442,7 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
   virtual void PrintDataTo(StringStream* stream);
 
   int arity() const { return hydrogen()->argument_count() - 1; }
+  Handle<JSFunction> known_function() { return hydrogen()->known_function(); }
 };
 
 
@@ -1491,16 +1494,10 @@ class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
   DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
 
-  explicit LCallGlobal(bool qml_global) : qml_global_(qml_global) {}
-
   virtual void PrintDataTo(StringStream* stream);
 
   Handle<String> name() const {return hydrogen()->name(); }
   int arity() const { return hydrogen()->argument_count() - 1; }
-
-  bool qml_global() { return qml_global_; }
- private:
-  bool qml_global_;
 };
 
 
@@ -1697,6 +1694,7 @@ class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
   LOperand* object() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
+  uint32_t additional_index() const { return hydrogen()->index_offset(); }
 };
 
 
@@ -1719,6 +1717,9 @@ class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
   LOperand* elements() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
+
+  bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
+  uint32_t additional_index() const { return hydrogen()->index_offset(); }
 };
 
 
@@ -1742,6 +1743,7 @@ class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
   ElementsKind elements_kind() const {
     return hydrogen()->elements_kind();
   }
+  uint32_t additional_index() const { return hydrogen()->index_offset(); }
 };
 
 
@@ -2347,11 +2349,6 @@ class LChunkBuilder BASE_EMBEDDED {
       LInstruction* instr,
       HInstruction* hinstr,
       CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
-  LInstruction* MarkAsSaveDoubles(LInstruction* instr);
-
-  LInstruction* SetInstructionPendingDeoptimizationEnvironment(
-      LInstruction* instr, int ast_id);
-  void ClearInstructionPendingDeoptimizationEnvironment();
 
   LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
                                   int* argument_index_accumulator);
index 12e653c..3d380a2 100644 (file)
@@ -150,6 +150,20 @@ int MacroAssembler::LoadAddressSize(ExternalReference source) {
 }
 
 
+void MacroAssembler::PushAddress(ExternalReference source) {
+  int64_t address = reinterpret_cast<int64_t>(source.address());
+  if (is_int32(address) && !Serializer::enabled()) {
+    if (emit_debug_code()) {
+      movq(kScratchRegister, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+    }
+    push(Immediate(static_cast<int32_t>(address)));
+    return;
+  }
+  LoadAddress(kScratchRegister, source);
+  push(kScratchRegister);
+}
+
+
 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
   ASSERT(root_array_available_);
   movq(destination, Operand(kRootRegister,
@@ -4174,7 +4188,7 @@ bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
 CodePatcher::CodePatcher(byte* address, int size)
     : address_(address),
       size_(size),
-      masm_(Isolate::Current(), address, size + Assembler::kGap) {
+      masm_(NULL, address, size + Assembler::kGap) {
   // Create a new macro assembler pointing to the address of the code to patch.
   // The size is adjusted with kGap on order for the assembler to generate size
   // bytes of instructions without failing with buffer size constraints.
index f7b36c5..66587d5 100644 (file)
@@ -127,6 +127,8 @@ class MacroAssembler: public Assembler {
   // Returns the size of the code generated by LoadAddress.
   // Used by CallSize(ExternalReference) to find the size of a call.
   int LoadAddressSize(ExternalReference source);
+  // Pushes the address of the external reference onto the stack.
+  void PushAddress(ExternalReference source);
 
   // Operations on roots in the root-array.
   void LoadRoot(Register destination, Heap::RootListIndex index);
@@ -1443,11 +1445,6 @@ inline Operand GlobalObjectOperand() {
 }
 
 
-static inline Operand QmlGlobalObjectOperand() {
-  return ContextOperand(rsi, Context::QML_GLOBAL_INDEX);
-}
-
-
 // Provides access to exit frame stack space (not GCed).
 inline Operand StackSpaceOperand(int index) {
 #ifdef _WIN64
index 9dfcf7a..5721e9b 100644 (file)
@@ -379,6 +379,7 @@ static void PushInterceptorArguments(MacroAssembler* masm,
   __ push(receiver);
   __ push(holder);
   __ push(FieldOperand(kScratchRegister, InterceptorInfo::kDataOffset));
+  __ PushAddress(ExternalReference::isolate_address());
 }
 
 
@@ -393,7 +394,7 @@ static void CompileCallLoadPropertyWithInterceptor(
   ExternalReference ref =
       ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
                         masm->isolate());
-  __ Set(rax, 5);
+  __ Set(rax, 6);
   __ LoadAddress(rbx, ref);
 
   CEntryStub stub(1);
@@ -402,7 +403,7 @@ static void CompileCallLoadPropertyWithInterceptor(
 
 
 // Number of pointers to be reserved on stack for fast API call.
-static const int kFastApiCallArguments = 3;
+static const int kFastApiCallArguments = 4;
 
 
 // Reserves space for the extra arguments to API function in the
@@ -452,10 +453,11 @@ static void GenerateFastApiCall(MacroAssembler* masm,
   //  -- rsp[16]             : api function
   //                           (first fast api call extra argument)
   //  -- rsp[24]             : api call data
-  //  -- rsp[32]             : last argument
+  //  -- rsp[32]             : isolate
+  //  -- rsp[40]             : last argument
   //  -- ...
-  //  -- rsp[(argc + 3) * 8] : first argument
-  //  -- rsp[(argc + 4) * 8] : receiver
+  //  -- rsp[(argc + 4) * 8] : first argument
+  //  -- rsp[(argc + 5) * 8] : receiver
   // -----------------------------------
   // Get the function and setup the context.
   Handle<JSFunction> function = optimization.constant_function();
@@ -473,9 +475,11 @@ static void GenerateFastApiCall(MacroAssembler* masm,
   } else {
     __ Move(Operand(rsp, 3 * kPointerSize), call_data);
   }
+  __ movq(kScratchRegister, ExternalReference::isolate_address());
+  __ movq(Operand(rsp, 4 * kPointerSize), kScratchRegister);
 
   // Prepare arguments.
-  __ lea(rbx, Operand(rsp, 3 * kPointerSize));
+  __ lea(rbx, Operand(rsp, 4 * kPointerSize));
 
 #if defined(__MINGW64__)
   Register arguments_arg = rcx;
@@ -665,7 +669,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
     __ CallExternalReference(
         ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
                           masm->isolate()),
-        5);
+        6);
 
     // Restore the name_ register.
     __ pop(name_);
@@ -732,8 +736,10 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
                                       Register scratch,
                                       Label* miss_label) {
   // Check that the map of the object hasn't changed.
+  CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
+                                             : REQUIRE_EXACT_MAP;
   __ CheckMap(receiver_reg, Handle<Map>(object->map()),
-              miss_label, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+              miss_label, DO_SMI_CHECK, mode);
 
   // Perform global security token check if needed.
   if (object->IsJSGlobalProxy()) {
@@ -1005,6 +1011,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
   } else {
     __ Push(Handle<Object>(callback->data()));
   }
+  __ PushAddress(ExternalReference::isolate_address());  // isolate
   __ push(name_reg);  // name
   // Save a pointer to where we pushed the arguments pointer.
   // This will be passed as the const AccessorInfo& to the C++ callback.
@@ -1025,14 +1032,14 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
   __ movq(name_arg, rsp);
   __ push(scratch2);  // Restore return address.
 
-  // 3 elements array for v8::Arguments::values_ and handler for name.
-  const int kStackSpace = 4;
+  // 4 elements array for v8::Arguments::values_ and handler for name.
+  const int kStackSpace = 5;
 
   // Allocate v8::AccessorInfo in non-GCed stack space.
   const int kArgStackSpace = 1;
 
   __ PrepareCallApiFunction(kArgStackSpace);
-  __ lea(rax, Operand(name_arg, 3 * kPointerSize));
+  __ lea(rax, Operand(name_arg, 4 * kPointerSize));
 
   // v8::AccessorInfo::args_.
   __ movq(StackSpaceOperand(0), rax);
@@ -1107,13 +1114,20 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
                                           name, miss);
     ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
 
+    // Preserve the receiver register explicitly whenever it is different from
+    // the holder and it is needed should the interceptor return without any
+    // result. The CALLBACKS case needs the receiver to be passed into C++ code,
+    // the FIELD case might cause a miss during the prototype check.
+    bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
+    bool must_preserve_receiver_reg = !receiver.is(holder_reg) &&
+        (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
+
     // Save necessary data before invoking an interceptor.
     // Requires a frame to make GC aware of pushed pointers.
     {
       FrameScope frame_scope(masm(), StackFrame::INTERNAL);
 
-      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-        // CALLBACKS case needs a receiver to be passed into C++ callback.
+      if (must_preserve_receiver_reg) {
         __ push(receiver);
       }
       __ push(holder_reg);
@@ -1139,7 +1153,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
       __ bind(&interceptor_failed);
       __ pop(name_reg);
       __ pop(holder_reg);
-      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+      if (must_preserve_receiver_reg) {
         __ pop(receiver);
       }
 
@@ -1148,7 +1162,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
 
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into |holder| register.
-    if (*interceptor_holder != lookup->holder()) {
+    if (must_perfrom_prototype_check) {
       holder_reg = CheckPrototypes(interceptor_holder,
                                    holder_reg,
                                    Handle<JSObject>(lookup->holder()),
@@ -1182,6 +1196,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
       __ push(holder_reg);
       __ Move(holder_reg, callback);
       __ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
+      __ PushAddress(ExternalReference::isolate_address());
       __ push(holder_reg);
       __ push(name_reg);
       __ push(scratch2);  // restore return address
@@ -1189,7 +1204,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
       ExternalReference ref =
           ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
                             isolate());
-      __ TailCallExternalReference(ref, 5, 1);
+      __ TailCallExternalReference(ref, 6, 1);
     }
   } else {  // !compile_followup_inline
     // Call the runtime system to load the interceptor.
@@ -1204,7 +1219,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
 
     ExternalReference ref = ExternalReference(
         IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate());
-    __ TailCallExternalReference(ref, 5, 1);
+    __ TailCallExternalReference(ref, 6, 1);
   }
 }
 
@@ -1996,7 +2011,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
                   name, depth, &miss);
 
   // Move the return address on top of the stack.
-  __ movq(rax, Operand(rsp, 3 * kPointerSize));
+  __ movq(rax, Operand(rsp, 4 * kPointerSize));
   __ movq(Operand(rsp, 0 * kPointerSize), rax);
 
   GenerateFastApiCall(masm(), optimization, argc);
@@ -3117,6 +3132,32 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
   __ jmp(miss_ic, RelocInfo::CODE_TARGET);
 }
 
+
+static void GenerateSmiKeyCheck(MacroAssembler* masm,
+                                Register key,
+                                Register scratch,
+                                XMMRegister xmm_scratch0,
+                                XMMRegister xmm_scratch1,
+                                Label* fail) {
+  // Check that key is a smi or a heap number containing a smi and branch
+  // if the check fails.
+  Label key_ok;
+  __ JumpIfSmi(key, &key_ok);
+  __ CheckMap(key,
+              masm->isolate()->factory()->heap_number_map(),
+              fail,
+              DONT_DO_SMI_CHECK);
+  __ movsd(xmm_scratch0, FieldOperand(key, HeapNumber::kValueOffset));
+  __ cvttsd2si(scratch, xmm_scratch0);
+  __ cvtlsi2sd(xmm_scratch1, scratch);
+  __ ucomisd(xmm_scratch1, xmm_scratch0);
+  __ j(not_equal, fail);
+  __ j(parity_even, fail);  // NaN.
+  __ Integer32ToSmi(key, scratch);
+  __ bind(&key_ok);
+}
+
+
 void KeyedLoadStubCompiler::GenerateLoadExternalArray(
     MacroAssembler* masm,
     ElementsKind elements_kind) {
@@ -3130,8 +3171,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
 
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(rax, &miss_force_generic);
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
 
   // Check that the index is in range.
   __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
@@ -3265,8 +3306,8 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
 
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(rcx, &miss_force_generic);
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
 
   // Check that the index is in range.
   __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
@@ -3367,30 +3408,28 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
     } else {
       // Perform float-to-int conversion with truncation (round-to-zero)
       // behavior.
+      // Fast path: use machine instruction to convert to int64. If that
+      // fails (out-of-range), go into the runtime.
+      __ cvttsd2siq(r8, xmm0);
+      __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
+      __ cmpq(r8, kScratchRegister);
+      __ j(equal, &slow);
 
-      // Convert to int32 and store the low byte/word.
-      // If the value is NaN or +/-infinity, the result is 0x80000000,
-      // which is automatically zero when taken mod 2^n, n < 32.
       // rdx: value (converted to an untagged integer)
       // rdi: untagged index
       // rbx: base pointer of external storage
       switch (elements_kind) {
         case EXTERNAL_BYTE_ELEMENTS:
         case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-          __ cvttsd2si(rdx, xmm0);
-          __ movb(Operand(rbx, rdi, times_1, 0), rdx);
+          __ movb(Operand(rbx, rdi, times_1, 0), r8);
           break;
         case EXTERNAL_SHORT_ELEMENTS:
         case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-          __ cvttsd2si(rdx, xmm0);
-          __ movw(Operand(rbx, rdi, times_2, 0), rdx);
+          __ movw(Operand(rbx, rdi, times_2, 0), r8);
           break;
         case EXTERNAL_INT_ELEMENTS:
         case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-          // Convert to int64, so that NaN and infinities become
-          // 0x8000000000000000, which is zero mod 2^32.
-          __ cvttsd2siq(rdx, xmm0);
-          __ movl(Operand(rbx, rdi, times_4, 0), rdx);
+          __ movl(Operand(rbx, rdi, times_4, 0), r8);
           break;
         case EXTERNAL_PIXEL_ELEMENTS:
         case EXTERNAL_FLOAT_ELEMENTS:
@@ -3447,8 +3486,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
 
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(rax, &miss_force_generic);
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
 
   // Get the elements array.
   __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
@@ -3489,8 +3528,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
 
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(rax, &miss_force_generic);
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
 
   // Get the elements array.
   __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
@@ -3545,8 +3584,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
 
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(rcx, &miss_force_generic);
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
 
   if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
     __ JumpIfNotSmi(rax, &transition_elements_kind);
@@ -3687,8 +3726,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
 
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(rcx, &miss_force_generic);
+  // Check that the key is a smi or a heap number convertible to a smi.
+  GenerateSmiKeyCheck(masm, rcx, rbx, xmm0, xmm1, &miss_force_generic);
 
   // Get the elements array.
   __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
@@ -3770,6 +3809,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
 
     // Increment the length of the array.
     __ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
+    __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
     __ jmp(&finish_store);
 
     __ bind(&check_capacity);
index b1900f9..0b342ff 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -116,6 +116,8 @@ static v8::Handle<v8::Object> x_holder;
 
 static v8::Handle<Value> XGetter(Local<String> name, const AccessorInfo& info) {
   ApiTestFuzzer::Fuzz();
+  v8::Isolate* isolate = v8::Isolate::GetCurrent();
+  CHECK_EQ(isolate, info.GetIsolate());
   CHECK_EQ(x_receiver, info.This());
   CHECK_EQ(x_holder, info.Holder());
   return v8_num(x_register);
@@ -125,6 +127,8 @@ static v8::Handle<Value> XGetter(Local<String> name, const AccessorInfo& info) {
 static void XSetter(Local<String> name,
                     Local<Value> value,
                     const AccessorInfo& info) {
+  v8::Isolate* isolate = v8::Isolate::GetCurrent();
+  CHECK_EQ(isolate, info.GetIsolate());
   CHECK_EQ(x_holder, info.This());
   CHECK_EQ(x_holder, info.Holder());
   x_register = value->Int32Value();
@@ -236,12 +240,15 @@ THREADED_TEST(HandleScopePop) {
 
 static v8::Handle<Value> CheckAccessorArgsCorrect(Local<String> name,
                                                   const AccessorInfo& info) {
+  CHECK(info.GetIsolate() == v8::Isolate::GetCurrent());
   CHECK(info.This() == info.Holder());
   CHECK(info.Data()->Equals(v8::String::New("data")));
   ApiTestFuzzer::Fuzz();
+  CHECK(info.GetIsolate() == v8::Isolate::GetCurrent());
   CHECK(info.This() == info.Holder());
   CHECK(info.Data()->Equals(v8::String::New("data")));
   HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+  CHECK(info.GetIsolate() == v8::Isolate::GetCurrent());
   CHECK(info.This() == info.Holder());
   CHECK(info.Data()->Equals(v8::String::New("data")));
   return v8::Integer::New(17);
index 769fe7b..e195d14 100644 (file)
 using namespace v8::internal;
 
 
+static inline void SimulateFullSpace(PagedSpace* space) {
+  int old_linear_size = static_cast<int>(space->limit() - space->top());
+  space->Free(space->top(), old_linear_size);
+  space->SetTop(space->limit(), space->limit());
+  space->ResetFreeList();
+  space->ClearStats();
+}
+
+
 static MaybeObject* AllocateAfterFailures() {
   static int attempts = 0;
   if (++attempts < 3) return Failure::RetryAfterGC();
@@ -65,24 +74,12 @@ static MaybeObject* AllocateAfterFailures() {
   CHECK(!heap->CopyJSObject(JSObject::cast(object))->IsFailure());
 
   // Old data space.
-  OldSpace* old_data_space = heap->old_data_space();
-  static const int kOldDataSpaceFillerSize = ByteArray::SizeFor(0);
-  while (old_data_space->Available() > kOldDataSpaceFillerSize) {
-    CHECK(!heap->AllocateByteArray(0, TENURED)->IsFailure());
-  }
+  SimulateFullSpace(heap->old_data_space());
   CHECK(!heap->AllocateRawAsciiString(100, TENURED)->IsFailure());
 
   // Old pointer space.
-  OldSpace* old_pointer_space = heap->old_pointer_space();
-  static const int kOldPointerSpaceFillerLength = 10000;
-  static const int kOldPointerSpaceFillerSize = FixedArray::SizeFor(
-      kOldPointerSpaceFillerLength);
-  while (old_pointer_space->Available() > kOldPointerSpaceFillerSize) {
-    CHECK(!heap->AllocateFixedArray(kOldPointerSpaceFillerLength, TENURED)->
-          IsFailure());
-  }
-  CHECK(!heap->AllocateFixedArray(kOldPointerSpaceFillerLength, TENURED)->
-        IsFailure());
+  SimulateFullSpace(heap->old_pointer_space());
+  CHECK(!heap->AllocateFixedArray(10000, TENURED)->IsFailure());
 
   // Large object space.
   static const int kLargeObjectSpaceFillerLength = 300000;
@@ -97,14 +94,9 @@ static MaybeObject* AllocateAfterFailures() {
         IsFailure());
 
   // Map space.
-  MapSpace* map_space = heap->map_space();
-  static const int kMapSpaceFillerSize = Map::kSize;
-  InstanceType instance_type = JS_OBJECT_TYPE;
+  SimulateFullSpace(heap->map_space());
   int instance_size = JSObject::kHeaderSize;
-  while (map_space->Available() > kMapSpaceFillerSize) {
-    CHECK(!heap->AllocateMap(instance_type, instance_size)->IsFailure());
-  }
-  CHECK(!heap->AllocateMap(instance_type, instance_size)->IsFailure());
+  CHECK(!heap->AllocateMap(JS_OBJECT_TYPE, instance_size)->IsFailure());
 
   // Test that we can allocate in old pointer space and code space.
   CHECK(!heap->AllocateFixedArray(100, TENURED)->IsFailure());
index c013150..8a1e914 100644 (file)
@@ -8608,6 +8608,8 @@ static void CheckInterceptorLoadIC(NamedPropertyGetter getter,
 static v8::Handle<Value> InterceptorLoadICGetter(Local<String> name,
                                                  const AccessorInfo& info) {
   ApiTestFuzzer::Fuzz();
+  v8::Isolate* isolate = v8::Isolate::GetCurrent();
+  CHECK_EQ(isolate, info.GetIsolate());
   CHECK_EQ(v8_str("data"), info.Data());
   CHECK_EQ(v8_str("x"), name);
   return v8::Integer::New(42);
@@ -9334,6 +9336,8 @@ static v8::Handle<Value> InterceptorCallICFastApi(Local<String> name,
 static v8::Handle<Value> FastApiCallback_TrivialSignature(
     const v8::Arguments& args) {
   ApiTestFuzzer::Fuzz();
+  v8::Isolate* isolate = v8::Isolate::GetCurrent();
+  CHECK_EQ(isolate, args.GetIsolate());
   CHECK_EQ(args.This(), args.Holder());
   CHECK(args.Data()->Equals(v8_str("method_data")));
   return v8::Integer::New(args[0]->Int32Value() + 1);
@@ -9342,6 +9346,8 @@ static v8::Handle<Value> FastApiCallback_TrivialSignature(
 static v8::Handle<Value> FastApiCallback_SimpleSignature(
     const v8::Arguments& args) {
   ApiTestFuzzer::Fuzz();
+  v8::Isolate* isolate = v8::Isolate::GetCurrent();
+  CHECK_EQ(isolate, args.GetIsolate());
   CHECK_EQ(args.This()->GetPrototype(), args.Holder());
   CHECK(args.Data()->Equals(v8_str("method_data")));
   // Note, we're using HasRealNamedProperty instead of Has to avoid
@@ -10865,13 +10871,18 @@ THREADED_TEST(NestedHandleScopeAndContexts) {
 }
 
 
+static int64_t cast(intptr_t x) { return static_cast<int64_t>(x); }
+
+
 THREADED_TEST(ExternalAllocatedMemory) {
   v8::HandleScope outer;
   v8::Persistent<Context> env(Context::New());
   CHECK(!env.IsEmpty());
-  const int kSize = 1024*1024;
-  CHECK_EQ(v8::V8::AdjustAmountOfExternalAllocatedMemory(kSize), kSize);
-  CHECK_EQ(v8::V8::AdjustAmountOfExternalAllocatedMemory(-kSize), 0);
+  const intptr_t kSize = 1024*1024;
+  CHECK_EQ(cast(v8::V8::AdjustAmountOfExternalAllocatedMemory(kSize)),
+           cast(kSize));
+  CHECK_EQ(cast(v8::V8::AdjustAmountOfExternalAllocatedMemory(-kSize)),
+           cast(0));
 }
 
 
@@ -12365,6 +12376,46 @@ THREADED_TEST(ForceDeleteIC) {
 }
 
 
+TEST(InlinedFunctionAcrossContexts) {
+  i::FLAG_allow_natives_syntax = true;
+  v8::HandleScope outer_scope;
+  v8::Persistent<v8::Context> ctx1 = v8::Context::New();
+  v8::Persistent<v8::Context> ctx2 = v8::Context::New();
+  ctx1->Enter();
+
+  {
+    v8::HandleScope inner_scope;
+    CompileRun("var G = 42; function foo() { return G; }");
+    v8::Local<v8::Value> foo = ctx1->Global()->Get(v8_str("foo"));
+    ctx2->Enter();
+    ctx2->Global()->Set(v8_str("o"), foo);
+    v8::Local<v8::Value> res = CompileRun(
+        "function f() { return o(); }"
+        "for (var i = 0; i < 10; ++i) f();"
+        "%OptimizeFunctionOnNextCall(f);"
+        "f();");
+    CHECK_EQ(42, res->Int32Value());
+    ctx2->Exit();
+    v8::Handle<v8::String> G_property = v8::String::New("G");
+    CHECK(ctx1->Global()->ForceDelete(G_property));
+    ctx2->Enter();
+    ExpectString(
+        "(function() {"
+        "  try {"
+        "    return f();"
+        "  } catch(e) {"
+        "    return e.toString();"
+        "  }"
+        " })()",
+        "ReferenceError: G is not defined");
+    ctx2->Exit();
+    ctx1->Exit();
+    ctx1.Dispose();
+  }
+  ctx2.Dispose();
+}
+
+
 v8::Persistent<Context> calling_context0;
 v8::Persistent<Context> calling_context1;
 v8::Persistent<Context> calling_context2;
@@ -12430,19 +12481,16 @@ THREADED_TEST(GetCallingContext) {
 
 
 // Check that a variable declaration with no explicit initialization
-// value does not shadow an existing property in the prototype chain.
-//
-// This is consistent with Firefox and Safari.
-//
-// See http://crbug.com/12548.
+// value does shadow an existing property in the prototype chain.
 THREADED_TEST(InitGlobalVarInProtoChain) {
+  i::FLAG_es52_globals = true;
   v8::HandleScope scope;
   LocalContext context;
   // Introduce a variable in the prototype chain.
   CompileRun("__proto__.x = 42");
-  v8::Handle<v8::Value> result = CompileRun("var x; x");
+  v8::Handle<v8::Value> result = CompileRun("var x = 43; x");
   CHECK(!result->IsUndefined());
-  CHECK_EQ(42, result->Int32Value());
+  CHECK_EQ(43, result->Int32Value());
 }
 
 
@@ -13947,75 +13995,104 @@ TEST(SourceURLInStackTrace) {
 }
 
 
+static void CreateGarbageInOldSpace() {
+  v8::HandleScope scope;
+  i::AlwaysAllocateScope always_allocate;
+  for (int i = 0; i < 1000; i++) {
+    FACTORY->NewFixedArray(1000, i::TENURED);
+  }
+}
+
 // Test that idle notification can be handled and eventually returns true.
-// This just checks the contract of the IdleNotification() function,
-// and does not verify that it does reasonable work.
-THREADED_TEST(IdleNotification) {
+TEST(IdleNotification) {
+  const intptr_t MB = 1024 * 1024;
   v8::HandleScope scope;
   LocalContext env;
-  {
-    // Create garbage in old-space to generate work for idle notification.
-    i::AlwaysAllocateScope always_allocate;
-    for (int i = 0; i < 100; i++) {
-      FACTORY->NewFixedArray(1000, i::TENURED);
-    }
+  intptr_t initial_size = HEAP->SizeOfObjects();
+  CreateGarbageInOldSpace();
+  intptr_t size_with_garbage = HEAP->SizeOfObjects();
+  CHECK_GT(size_with_garbage, initial_size + MB);
+  bool finished = false;
+  for (int i = 0; i < 200 && !finished; i++) {
+    finished = v8::V8::IdleNotification();
   }
-  bool finshed_idle_work = false;
-  for (int i = 0; i < 100 && !finshed_idle_work; i++) {
-    finshed_idle_work = v8::V8::IdleNotification();
-  }
-  CHECK(finshed_idle_work);
+  intptr_t final_size = HEAP->SizeOfObjects();
+  CHECK(finished);
+  CHECK_LT(final_size, initial_size + 1);
 }
 
-// Test that idle notification can be handled and eventually returns true.
-// This just checks the contract of the IdleNotification() function,
-// and does not verify that it does reasonable work.
+
+// Test that idle notification can be handled and eventually collects garbage.
 TEST(IdleNotificationWithSmallHint) {
+  const intptr_t MB = 1024 * 1024;
+  const int IdlePauseInMs = 900;
   v8::HandleScope scope;
   LocalContext env;
-  {
-    // Create garbage in old-space to generate work for idle notification.
-    i::AlwaysAllocateScope always_allocate;
-    for (int i = 0; i < 100; i++) {
-      FACTORY->NewFixedArray(1000, i::TENURED);
-    }
+  intptr_t initial_size = HEAP->SizeOfObjects();
+  CreateGarbageInOldSpace();
+  intptr_t size_with_garbage = HEAP->SizeOfObjects();
+  CHECK_GT(size_with_garbage, initial_size + MB);
+  bool finished = false;
+  for (int i = 0; i < 200 && !finished; i++) {
+    finished = v8::V8::IdleNotification(IdlePauseInMs);
   }
-  intptr_t old_size = HEAP->SizeOfObjects();
-  bool finshed_idle_work = false;
-  bool no_idle_work = v8::V8::IdleNotification(10);
-  for (int i = 0; i < 200 && !finshed_idle_work; i++) {
-    finshed_idle_work = v8::V8::IdleNotification(10);
-  }
-  intptr_t new_size = HEAP->SizeOfObjects();
-  CHECK(finshed_idle_work);
-  CHECK(no_idle_work || new_size < old_size);
+  intptr_t final_size = HEAP->SizeOfObjects();
+  CHECK(finished);
+  CHECK_LT(final_size, initial_size + 1);
 }
 
 
-// This just checks the contract of the IdleNotification() function,
-// and does not verify that it does reasonable work.
+// Test that idle notification can be handled and eventually collects garbage.
 TEST(IdleNotificationWithLargeHint) {
+  const intptr_t MB = 1024 * 1024;
+  const int IdlePauseInMs = 900;
   v8::HandleScope scope;
   LocalContext env;
-  {
-    // Create garbage in old-space to generate work for idle notification.
-    i::AlwaysAllocateScope always_allocate;
-    for (int i = 0; i < 100; i++) {
-      FACTORY->NewFixedArray(1000, i::TENURED);
-    }
-  }
-  intptr_t old_size = HEAP->SizeOfObjects();
-  bool finshed_idle_work = false;
-  bool no_idle_work = v8::V8::IdleNotification(900);
-  for (int i = 0; i < 200 && !finshed_idle_work; i++) {
-    finshed_idle_work = v8::V8::IdleNotification(900);
+  intptr_t initial_size = HEAP->SizeOfObjects();
+  CreateGarbageInOldSpace();
+  intptr_t size_with_garbage = HEAP->SizeOfObjects();
+  CHECK_GT(size_with_garbage, initial_size + MB);
+  bool finished = false;
+  for (int i = 0; i < 200 && !finished; i++) {
+    finished = v8::V8::IdleNotification(IdlePauseInMs);
   }
-  intptr_t new_size = HEAP->SizeOfObjects();
-  CHECK(finshed_idle_work);
-  CHECK(no_idle_work || new_size < old_size);
+  intptr_t final_size = HEAP->SizeOfObjects();
+  CHECK(finished);
+  CHECK_LT(final_size, initial_size + 1);
 }
 
 
+TEST(Regress2107) {
+  const intptr_t MB = 1024 * 1024;
+  const int kShortIdlePauseInMs = 100;
+  const int kLongIdlePauseInMs = 1000;
+  v8::HandleScope scope;
+  LocalContext env;
+  intptr_t initial_size = HEAP->SizeOfObjects();
+  // Send idle notification to start a round of incremental GCs.
+  v8::V8::IdleNotification(kShortIdlePauseInMs);
+  // Emulate 7 page reloads.
+  for (int i = 0; i < 7; i++) {
+    v8::Persistent<v8::Context> ctx = v8::Context::New();
+    ctx->Enter();
+    CreateGarbageInOldSpace();
+    ctx->Exit();
+    ctx.Dispose();
+    v8::V8::ContextDisposedNotification();
+    v8::V8::IdleNotification(kLongIdlePauseInMs);
+  }
+  // Create garbage and check that idle notification still collects it.
+  CreateGarbageInOldSpace();
+  intptr_t size_with_garbage = HEAP->SizeOfObjects();
+  CHECK_GT(size_with_garbage, initial_size + MB);
+  bool finished = false;
+  for (int i = 0; i < 200 && !finished; i++) {
+    finished = v8::V8::IdleNotification(kShortIdlePauseInMs);
+  }
+  intptr_t final_size = HEAP->SizeOfObjects();
+  CHECK_LT(final_size, initial_size + 1);
+}
+
 static uint32_t* stack_limit;
 
 static v8::Handle<Value> GetStackLimitCallback(const v8::Arguments& args) {
@@ -14319,7 +14396,6 @@ TEST(Regress528) {
   v8::Persistent<Context> context;
   v8::Persistent<Context> other_context;
   int gc_count;
-  bool snapshot_enabled = i::Snapshot::IsEnabled();
 
   // Create a context used to keep the code from aging in the compilation
   // cache.
@@ -14344,10 +14420,10 @@ TEST(Regress528) {
     CompileRun(source_simple);
     other_context->Exit();
     HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
-    if (GetGlobalObjectsCount() == (snapshot_enabled ? 2 : 1)) break;
+    if (GetGlobalObjectsCount() == 1) break;
   }
   CHECK_GE(2, gc_count);
-  CHECK_EQ((snapshot_enabled ? 2 : 1), GetGlobalObjectsCount());
+  CHECK_EQ(1, GetGlobalObjectsCount());
 
   // Eval in a function creates reference from the compilation cache to the
   // global object.
@@ -14366,10 +14442,10 @@ TEST(Regress528) {
     CompileRun(source_eval);
     other_context->Exit();
     HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
-    if (GetGlobalObjectsCount() == (snapshot_enabled ? 2 : 1)) break;
+    if (GetGlobalObjectsCount() == 1) break;
   }
   CHECK_GE(2, gc_count);
-  CHECK_EQ((snapshot_enabled ? 2 : 1), GetGlobalObjectsCount());
+  CHECK_EQ(1, GetGlobalObjectsCount());
 
   // Looking up the line number for an exception creates reference from the
   // compilation cache to the global object.
@@ -14393,10 +14469,10 @@ TEST(Regress528) {
     CompileRun(source_exception);
     other_context->Exit();
     HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
-    if (GetGlobalObjectsCount() == (snapshot_enabled ? 2 : 1)) break;
+    if (GetGlobalObjectsCount() == 1) break;
   }
   CHECK_GE(2, gc_count);
-  CHECK_EQ((snapshot_enabled ? 2 : 1), GetGlobalObjectsCount());
+  CHECK_EQ(1, GetGlobalObjectsCount());
 
   other_context.Dispose();
 }
@@ -16136,6 +16212,30 @@ THREADED_TEST(Regress93759) {
 }
 
 
+THREADED_TEST(Regress125988) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> intercept = FunctionTemplate::New();
+  AddInterceptor(intercept, EmptyInterceptorGetter, EmptyInterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Intercept"), intercept->GetFunction());
+  CompileRun("var a = new Object();"
+             "var b = new Intercept();"
+             "var c = new Object();"
+             "c.__proto__ = b;"
+             "b.__proto__ = a;"
+             "a.x = 23;"
+             "for (var i = 0; i < 3; i++) c.x;");
+  ExpectBoolean("c.hasOwnProperty('x')", false);
+  ExpectInt32("c.x", 23);
+  CompileRun("a.y = 42;"
+             "for (var i = 0; i < 3; i++) c.x;");
+  ExpectBoolean("c.hasOwnProperty('x')", false);
+  ExpectInt32("c.x", 23);
+  ExpectBoolean("c.hasOwnProperty('y')", false);
+  ExpectInt32("c.y", 42);
+}
+
+
 static void TestReceiver(Local<Value> expected_result,
                          Local<Value> expected_receiver,
                          const char* code) {
@@ -16402,3 +16502,94 @@ TEST(PrimaryStubCache) {
   StubCacheHelper(false);
 }
 
+
+static int fatal_error_callback_counter = 0;
+static void CountingErrorCallback(const char* location, const char* message) {
+  printf("CountingErrorCallback(\"%s\", \"%s\")\n", location, message);
+  fatal_error_callback_counter++;
+}
+
+
+TEST(StaticGetters) {
+  v8::HandleScope scope;
+  LocalContext context;
+  v8::Isolate* isolate = v8::Isolate::GetCurrent();
+  i::Handle<i::Object> undefined_value = FACTORY->undefined_value();
+  CHECK(*v8::Utils::OpenHandle(*v8::Undefined()) == *undefined_value);
+  CHECK(*v8::Utils::OpenHandle(*v8::Undefined(isolate)) == *undefined_value);
+  i::Handle<i::Object> null_value = FACTORY->null_value();
+  CHECK(*v8::Utils::OpenHandle(*v8::Null()) == *null_value);
+  CHECK(*v8::Utils::OpenHandle(*v8::Null(isolate)) == *null_value);
+  i::Handle<i::Object> true_value = FACTORY->true_value();
+  CHECK(*v8::Utils::OpenHandle(*v8::True()) == *true_value);
+  CHECK(*v8::Utils::OpenHandle(*v8::True(isolate)) == *true_value);
+  i::Handle<i::Object> false_value = FACTORY->false_value();
+  CHECK(*v8::Utils::OpenHandle(*v8::False()) == *false_value);
+  CHECK(*v8::Utils::OpenHandle(*v8::False(isolate)) == *false_value);
+
+  // Test after-death behavior.
+  CHECK(i::Internals::IsInitialized(isolate));
+  CHECK_EQ(0, fatal_error_callback_counter);
+  v8::V8::SetFatalErrorHandler(CountingErrorCallback);
+  v8::Utils::ReportApiFailure("StaticGetters()", "Kill V8");
+  i::Isolate::Current()->TearDown();
+  CHECK(!i::Internals::IsInitialized(isolate));
+  CHECK_EQ(1, fatal_error_callback_counter);
+  CHECK(v8::Undefined().IsEmpty());
+  CHECK_EQ(2, fatal_error_callback_counter);
+  CHECK(v8::Undefined(isolate).IsEmpty());
+  CHECK_EQ(3, fatal_error_callback_counter);
+  CHECK(v8::Null().IsEmpty());
+  CHECK_EQ(4, fatal_error_callback_counter);
+  CHECK(v8::Null(isolate).IsEmpty());
+  CHECK_EQ(5, fatal_error_callback_counter);
+  CHECK(v8::True().IsEmpty());
+  CHECK_EQ(6, fatal_error_callback_counter);
+  CHECK(v8::True(isolate).IsEmpty());
+  CHECK_EQ(7, fatal_error_callback_counter);
+  CHECK(v8::False().IsEmpty());
+  CHECK_EQ(8, fatal_error_callback_counter);
+  CHECK(v8::False(isolate).IsEmpty());
+  CHECK_EQ(9, fatal_error_callback_counter);
+}
+
+
+TEST(IsolateEmbedderData) {
+  v8::Isolate* isolate = v8::Isolate::GetCurrent();
+  CHECK_EQ(NULL, isolate->GetData());
+  CHECK_EQ(NULL, ISOLATE->GetData());
+  static void* data1 = reinterpret_cast<void*>(0xacce55ed);
+  isolate->SetData(data1);
+  CHECK_EQ(data1, isolate->GetData());
+  CHECK_EQ(data1, ISOLATE->GetData());
+  static void* data2 = reinterpret_cast<void*>(0xdecea5ed);
+  ISOLATE->SetData(data2);
+  CHECK_EQ(data2, isolate->GetData());
+  CHECK_EQ(data2, ISOLATE->GetData());
+  ISOLATE->TearDown();
+  CHECK_EQ(data2, isolate->GetData());
+  CHECK_EQ(data2, ISOLATE->GetData());
+}
+
+
+TEST(StringEmpty) {
+  v8::HandleScope scope;
+  LocalContext context;
+  v8::Isolate* isolate = v8::Isolate::GetCurrent();
+  i::Handle<i::Object> empty_string = FACTORY->empty_symbol();
+  CHECK(*v8::Utils::OpenHandle(*v8::String::Empty()) == *empty_string);
+  CHECK(*v8::Utils::OpenHandle(*v8::String::Empty(isolate)) == *empty_string);
+
+  // Test after-death behavior.
+  CHECK(i::Internals::IsInitialized(isolate));
+  CHECK_EQ(0, fatal_error_callback_counter);
+  v8::V8::SetFatalErrorHandler(CountingErrorCallback);
+  v8::Utils::ReportApiFailure("StringEmpty()", "Kill V8");
+  i::Isolate::Current()->TearDown();
+  CHECK(!i::Internals::IsInitialized(isolate));
+  CHECK_EQ(1, fatal_error_callback_counter);
+  CHECK(v8::String::Empty().IsEmpty());
+  CHECK_EQ(2, fatal_error_callback_counter);
+  CHECK(v8::String::Empty(isolate).IsEmpty());
+  CHECK_EQ(3, fatal_error_callback_counter);
+}
index 107c666..e40f406 100644 (file)
@@ -2300,65 +2300,6 @@ TEST(ScriptBreakPointTopLevelCrash) {
   CheckDebuggerUnloaded();
 }
 
-// Test that breakpoint_relocation flag is honored
-TEST(ScriptBreakPointNoRelocation) {
-    i::FLAG_breakpoint_relocation = false;
-
-    v8::HandleScope scope;
-    DebugLocalContext env;
-    env.ExposeDebug();
-
-    // Create a function for checking the function when hitting a break point.
-    frame_function_name = CompileFunction(&env,
-                                          frame_function_name_source,
-                                          "frame_function_name");
-
-    v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount,
-                                     v8::Undefined());
-
-    v8::Local<v8::String> script1 = v8::String::New(
-      "a = 0                      // line 0\n"
-      "                           // line 1\n"
-      "                           // line 2\n"
-      "                           // line 3\n"
-      "function f() {             // line 4\n"
-      "  return 0;                // line 5\n"
-      "}                          // line 6");
-
-    // Set the script break point on the empty line
-    SetScriptBreakPointByNameFromJS("test.html", 2, -1);
-
-    // Compile the script and call the function.
-    v8::ScriptOrigin origin(v8::String::New("test.html"), v8::Integer::New(0));
-    v8::Script::Compile(script1, &origin)->Run();
-    v8::Local<v8::Function> f
-            = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
-    f->Call(env->Global(), 0, NULL);
-
-    // Check that a break point was not hit
-    CHECK_EQ(0, break_point_hit_count);
-
-    v8::Local<v8::String> script2 = v8::String::New(
-      "a = 0                      // line 0\n"
-      "function g() {             // line 1\n"
-      "  return 0;                // line 2\n"
-      "}                          // line 3\n"
-      "function f() {             // line 4\n"
-      "  return 0;                // line 5\n"
-      "}                          // line 6");
-
-    // Compile the script and call the new function
-    v8::Script::Compile(script2, &origin)->Run();
-    v8::Local<v8::Function> g
-            = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("g")));
-    g->Call(env->Global(), 0, NULL);
-
-    // Check that a break point was not hit
-    CHECK_EQ(1, break_point_hit_count);
-
-    v8::Debug::SetDebugEventListener(NULL);
-    CheckDebuggerUnloaded();
-}
 
 // Test that it is possible to remove the last break point for a function
 // inside the break handling of that break point.
@@ -5072,7 +5013,10 @@ static void ThreadedMessageHandler(const v8::Debug::Message& message) {
   if (IsBreakEventMessage(print_buffer)) {
     // Check that we are inside the while loop.
     int source_line = GetSourceLineFromBreakEventMessage(print_buffer);
-    CHECK(8 <= source_line && source_line <= 13);
+    // TODO(2047): This should really be 8 <= source_line <= 13; but we
+    // currently have an off-by-one error when calculating the source
+    // position corresponding to the program counter at the debug break.
+    CHECK(7 <= source_line && source_line <= 13);
     threaded_debugging_barriers.barrier_2.Wait();
   }
 }
index aa733c7..e6bdc9f 100644 (file)
@@ -521,6 +521,7 @@ class ExistsInPrototypeContext: public DeclarationContext {
 
 
 TEST(ExistsInPrototype) {
+  i::FLAG_es52_globals = true;
   HandleScope scope;
 
   // Sanity check to make sure that the holder of the interceptor
@@ -535,17 +536,17 @@ TEST(ExistsInPrototype) {
 
   { ExistsInPrototypeContext context;
     context.Check("var x; x",
-                  1,  // get
+                  0,  // get
                   0,
-                  1,  // declaration
-                  EXPECT_EXCEPTION);
+                  0,  // declaration
+                  EXPECT_RESULT, Undefined());
   }
 
   { ExistsInPrototypeContext context;
     context.Check("var x = 0; x",
                   0,
                   0,
-                  1,  // declaration
+                  0,  // declaration
                   EXPECT_RESULT, Number::New(0));
   }
 
@@ -553,7 +554,7 @@ TEST(ExistsInPrototype) {
     context.Check("const x; x",
                   0,
                   0,
-                  1,  // declaration
+                  0,  // declaration
                   EXPECT_RESULT, Undefined());
   }
 
@@ -561,7 +562,7 @@ TEST(ExistsInPrototype) {
     context.Check("const x = 0; x",
                   0,
                   0,
-                  1,  // declaration
+                  0,  // declaration
                   EXPECT_RESULT, Number::New(0));
   }
 }
@@ -583,13 +584,14 @@ class AbsentInPrototypeContext: public DeclarationContext {
 
 
 TEST(AbsentInPrototype) {
+  i::FLAG_es52_globals = true;
   HandleScope scope;
 
   { AbsentInPrototypeContext context;
     context.Check("if (false) { var x = 0; }; x",
                   0,
                   0,
-                  1,  // declaration
+                  0,  // declaration
                   EXPECT_RESULT, Undefined());
   }
 }
index 3594a4f..6ef42c6 100644 (file)
@@ -112,21 +112,6 @@ TEST(IsInfinite) {
 }
 
 
-TEST(IsNan) {
-  CHECK(Double(OS::nan_value()).IsNan());
-  uint64_t other_nan = V8_2PART_UINT64_C(0xFFFFFFFF, 00000001);
-  CHECK(Double(other_nan).IsNan());
-  CHECK(!Double(V8_INFINITY).IsNan());
-  CHECK(!Double(-V8_INFINITY).IsNan());
-  CHECK(!Double(0.0).IsNan());
-  CHECK(!Double(-0.0).IsNan());
-  CHECK(!Double(1.0).IsNan());
-  CHECK(!Double(-1.0).IsNan());
-  uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
-  CHECK(!Double(min_double64).IsNan());
-}
-
-
 TEST(Sign) {
   CHECK_EQ(1, Double(1.0).Sign());
   CHECK_EQ(1, Double(V8_INFINITY).Sign());
index b6fc486..cbe8d44 100644 (file)
@@ -2,11 +2,14 @@
 //
 // Tests for heap profiler
 
+#include <ctype.h>
+
 #include "v8.h"
 
 #include "cctest.h"
 #include "heap-profiler.h"
 #include "snapshot.h"
+#include "debug.h"
 #include "utils-inl.h"
 #include "../include/v8-profiler.h"
 
@@ -31,10 +34,10 @@ class NamedEntriesDetector {
     CheckEntry(root);
     while (!list.is_empty()) {
       i::HeapEntry* entry = list.RemoveLast();
-      i::Vector<i::HeapGraphEdge> children = entry->children();
+      i::Vector<i::HeapGraphEdge*> children = entry->children();
       for (int i = 0; i < children.length(); ++i) {
-        if (children[i].type() == i::HeapGraphEdge::kShortcut) continue;
-        i::HeapEntry* child = children[i].to();
+        if (children[i]->type() == i::HeapGraphEdge::kShortcut) continue;
+        i::HeapEntry* child = children[i]->to();
         if (!child->painted()) {
           list.Add(child);
           child->paint();
@@ -54,11 +57,9 @@ class NamedEntriesDetector {
 
 static const v8::HeapGraphNode* GetGlobalObject(
     const v8::HeapSnapshot* snapshot) {
-  bool snapshot_enabled = i::Snapshot::IsEnabled();
-
-  CHECK_EQ((snapshot_enabled ? 3 : 2), snapshot->GetRoot()->GetChildrenCount());
+  CHECK_EQ(2, snapshot->GetRoot()->GetChildrenCount());
   const v8::HeapGraphNode* global_obj =
-      snapshot->GetRoot()->GetChild(snapshot_enabled ? 1 : 0)->GetToNode();
+      snapshot->GetRoot()->GetChild(0)->GetToNode();
   CHECK_EQ(0, strncmp("Object", const_cast<i::HeapEntry*>(
       reinterpret_cast<const i::HeapEntry*>(global_obj))->name(), 6));
   return global_obj;
@@ -111,13 +112,13 @@ TEST(HeapSnapshot) {
 
   // Verify, that JS global object of env2 has '..2' properties.
   const v8::HeapGraphNode* a2_node =
-      GetProperty(global_env2, v8::HeapGraphEdge::kShortcut, "a2");
+      GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "a2");
   CHECK_NE(NULL, a2_node);
   CHECK_NE(
-      NULL, GetProperty(global_env2, v8::HeapGraphEdge::kShortcut, "b2_1"));
+      NULL, GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "b2_1"));
   CHECK_NE(
-      NULL, GetProperty(global_env2, v8::HeapGraphEdge::kShortcut, "b2_2"));
-  CHECK_NE(NULL, GetProperty(global_env2, v8::HeapGraphEdge::kShortcut, "c2"));
+      NULL, GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "b2_2"));
+  CHECK_NE(NULL, GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "c2"));
 
   // Paint all nodes reachable from global object.
   NamedEntriesDetector det;
@@ -139,12 +140,13 @@ TEST(HeapSnapshotObjectSizes) {
   CompileRun(
       "function X(a, b) { this.a = a; this.b = b; }\n"
       "x = new X(new X(), new X());\n"
+      "dummy = new X();\n"
       "(function() { x.a.a = x.b; })();");
   const v8::HeapSnapshot* snapshot =
       v8::HeapProfiler::TakeSnapshot(v8_str("sizes"));
   const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
   const v8::HeapGraphNode* x =
-      GetProperty(global, v8::HeapGraphEdge::kShortcut, "x");
+      GetProperty(global, v8::HeapGraphEdge::kProperty, "x");
   CHECK_NE(NULL, x);
   const v8::HeapGraphNode* x1 =
       GetProperty(x, v8::HeapGraphEdge::kProperty, "a");
@@ -171,7 +173,7 @@ TEST(BoundFunctionInSnapshot) {
       v8::HeapProfiler::TakeSnapshot(v8_str("sizes"));
   const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
   const v8::HeapGraphNode* f =
-      GetProperty(global, v8::HeapGraphEdge::kShortcut, "boundFunction");
+      GetProperty(global, v8::HeapGraphEdge::kProperty, "boundFunction");
   CHECK(f);
   CHECK_EQ(v8::String::New("native_bind"), f->GetName());
   const v8::HeapGraphNode* bindings =
@@ -235,15 +237,15 @@ TEST(HeapSnapshotCodeObjects) {
 
   const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
   const v8::HeapGraphNode* compiled =
-      GetProperty(global, v8::HeapGraphEdge::kShortcut, "compiled");
+      GetProperty(global, v8::HeapGraphEdge::kProperty, "compiled");
   CHECK_NE(NULL, compiled);
   CHECK_EQ(v8::HeapGraphNode::kClosure, compiled->GetType());
   const v8::HeapGraphNode* lazy =
-      GetProperty(global, v8::HeapGraphEdge::kShortcut, "lazy");
+      GetProperty(global, v8::HeapGraphEdge::kProperty, "lazy");
   CHECK_NE(NULL, lazy);
   CHECK_EQ(v8::HeapGraphNode::kClosure, lazy->GetType());
   const v8::HeapGraphNode* anonymous =
-      GetProperty(global, v8::HeapGraphEdge::kShortcut, "anonymous");
+      GetProperty(global, v8::HeapGraphEdge::kProperty, "anonymous");
   CHECK_NE(NULL, anonymous);
   CHECK_EQ(v8::HeapGraphNode::kClosure, anonymous->GetType());
   v8::String::AsciiValue anonymous_name(anonymous->GetName());
@@ -295,9 +297,9 @@ TEST(HeapSnapshotHeapNumbers) {
   const v8::HeapSnapshot* snapshot =
       v8::HeapProfiler::TakeSnapshot(v8_str("numbers"));
   const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
-  CHECK_EQ(NULL, GetProperty(global, v8::HeapGraphEdge::kShortcut, "a"));
+  CHECK_EQ(NULL, GetProperty(global, v8::HeapGraphEdge::kProperty, "a"));
   const v8::HeapGraphNode* b =
-      GetProperty(global, v8::HeapGraphEdge::kShortcut, "b");
+      GetProperty(global, v8::HeapGraphEdge::kProperty, "b");
   CHECK_NE(NULL, b);
   CHECK_EQ(v8::HeapGraphNode::kHeapNumber, b->GetType());
 }
@@ -315,10 +317,10 @@ TEST(HeapSnapshotSlicedString) {
       v8::HeapProfiler::TakeSnapshot(v8_str("strings"));
   const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
   const v8::HeapGraphNode* parent_string =
-      GetProperty(global, v8::HeapGraphEdge::kShortcut, "parent_string");
+      GetProperty(global, v8::HeapGraphEdge::kProperty, "parent_string");
   CHECK_NE(NULL, parent_string);
   const v8::HeapGraphNode* child_string =
-      GetProperty(global, v8::HeapGraphEdge::kShortcut, "child_string");
+      GetProperty(global, v8::HeapGraphEdge::kProperty, "child_string");
   CHECK_NE(NULL, child_string);
   const v8::HeapGraphNode* parent =
       GetProperty(child_string, v8::HeapGraphEdge::kInternal, "parent");
@@ -386,24 +388,17 @@ TEST(HeapEntryIdsAndArrayShift) {
   const v8::HeapGraphNode* a1 =
       GetProperty(global1, v8::HeapGraphEdge::kProperty, "a");
   CHECK_NE(NULL, a1);
-  const v8::HeapGraphNode* e1 =
-      GetProperty(a1, v8::HeapGraphEdge::kHidden, "1");
-  CHECK_NE(NULL, e1);
   const v8::HeapGraphNode* k1 =
-      GetProperty(e1, v8::HeapGraphEdge::kInternal, "elements");
+      GetProperty(a1, v8::HeapGraphEdge::kInternal, "elements");
   CHECK_NE(NULL, k1);
   const v8::HeapGraphNode* a2 =
       GetProperty(global2, v8::HeapGraphEdge::kProperty, "a");
   CHECK_NE(NULL, a2);
-  const v8::HeapGraphNode* e2 =
-      GetProperty(a2, v8::HeapGraphEdge::kHidden, "1");
-  CHECK_NE(NULL, e2);
   const v8::HeapGraphNode* k2 =
-      GetProperty(e2, v8::HeapGraphEdge::kInternal, "elements");
+      GetProperty(a2, v8::HeapGraphEdge::kInternal, "elements");
   CHECK_NE(NULL, k2);
 
   CHECK_EQ_SNAPSHOT_OBJECT_ID(a1->GetId(), a2->GetId());
-  CHECK_EQ_SNAPSHOT_OBJECT_ID(e1->GetId(), e2->GetId());
   CHECK_EQ_SNAPSHOT_OBJECT_ID(k1->GetId(), k2->GetId());
 }
 
@@ -516,7 +511,7 @@ TEST(HeapEntryDominator) {
   const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
   CHECK_NE(NULL, global);
   const v8::HeapGraphNode* node6 =
-      GetProperty(global, v8::HeapGraphEdge::kShortcut, "node6");
+      GetProperty(global, v8::HeapGraphEdge::kProperty, "node6");
   CHECK_NE(NULL, node6);
   const v8::HeapGraphNode* node5 =
       GetProperty(node6, v8::HeapGraphEdge::kProperty, "a");
@@ -559,9 +554,14 @@ class TestJSONStream : public v8::OutputStream {
     memcpy(chunk.start(), buffer, chars_written);
     return kContinue;
   }
+  virtual WriteResult WriteUint32Chunk(uint32_t* buffer, int chars_written) {
+    ASSERT(false);
+    return kAbort;
+  }
   void WriteTo(i::Vector<char> dest) { buffer_.WriteTo(dest); }
   int eos_signaled() { return eos_signaled_; }
   int size() { return buffer_.size(); }
+
  private:
   i::Collector<char> buffer_;
   int eos_signaled_;
@@ -615,66 +615,50 @@ TEST(HeapSnapshotJSONSerialization) {
       env->Global()->Get(v8_str("parsed"))->ToObject();
   CHECK(parsed_snapshot->Has(v8_str("snapshot")));
   CHECK(parsed_snapshot->Has(v8_str("nodes")));
+  CHECK(parsed_snapshot->Has(v8_str("edges")));
   CHECK(parsed_snapshot->Has(v8_str("strings")));
 
   // Get node and edge "member" offsets.
   v8::Local<v8::Value> meta_analysis_result = CompileRun(
-      "var parsed_meta = parsed.nodes[0];\n"
-      "var children_count_offset ="
-      "    parsed_meta.fields.indexOf('children_count');\n"
-      "var children_offset ="
-      "    parsed_meta.fields.indexOf('children');\n"
-      "var children_meta ="
-      "    parsed_meta.types[children_offset];\n"
-      "var child_fields_count = children_meta.fields.length;\n"
-      "var child_type_offset ="
-      "    children_meta.fields.indexOf('type');\n"
-      "var child_name_offset ="
-      "    children_meta.fields.indexOf('name_or_index');\n"
-      "var child_to_node_offset ="
-      "    children_meta.fields.indexOf('to_node');\n"
+      "var meta = parsed.snapshot.meta;\n"
+      "var edges_index_offset = meta.node_fields.indexOf('edges_index');\n"
+      "var node_fields_count = meta.node_fields.length;\n"
+      "var edge_fields_count = meta.edge_fields.length;\n"
+      "var edge_type_offset = meta.edge_fields.indexOf('type');\n"
+      "var edge_name_offset = meta.edge_fields.indexOf('name_or_index');\n"
+      "var edge_to_node_offset = meta.edge_fields.indexOf('to_node');\n"
       "var property_type ="
-      "    children_meta.types[child_type_offset].indexOf('property');\n"
+      "    meta.edge_types[edge_type_offset].indexOf('property');\n"
       "var shortcut_type ="
-      "    children_meta.types[child_type_offset].indexOf('shortcut');");
+      "    meta.edge_types[edge_type_offset].indexOf('shortcut');\n"
+      "parsed.nodes.concat(0, 0, 0, 0, 0, 0, parsed.edges.length);");
   CHECK(!meta_analysis_result.IsEmpty());
 
   // A helper function for processing encoded nodes.
   CompileRun(
       "function GetChildPosByProperty(pos, prop_name, prop_type) {\n"
       "  var nodes = parsed.nodes;\n"
+      "  var edges = parsed.edges;\n"
       "  var strings = parsed.strings;\n"
-      "  for (var i = 0,\n"
-      "      count = nodes[pos + children_count_offset] * child_fields_count;\n"
-      "      i < count; i += child_fields_count) {\n"
-      "    var child_pos = pos + children_offset + i;\n"
-      "    if (nodes[child_pos + child_type_offset] === prop_type\n"
-      "       && strings[nodes[child_pos + child_name_offset]] === prop_name)\n"
-      "        return nodes[child_pos + child_to_node_offset];\n"
+      "  for (var i = nodes[pos + edges_index_offset],\n"
+      "      count = nodes[pos + node_fields_count + edges_index_offset];\n"
+      "      i < count; i += edge_fields_count) {\n"
+      "    if (edges[i + edge_type_offset] === prop_type\n"
+      "        && strings[edges[i + edge_name_offset]] === prop_name)\n"
+      "      return edges[i + edge_to_node_offset];\n"
       "  }\n"
       "  return null;\n"
       "}\n");
   // Get the string index using the path: <root> -> <global>.b.x.s
-  v8::Local<v8::Value> string_obj_pos_val;
-  if (i::Snapshot::IsEnabled()) {
-      string_obj_pos_val = CompileRun(
-          "GetChildPosByProperty(\n"
-          "  GetChildPosByProperty(\n"
-          "    GetChildPosByProperty("
-          "      parsed.nodes[1 + children_offset + child_to_node_offset + child_fields_count],"
-          "      \"b\",shortcut_type),\n"
-          "    \"x\", property_type),"
-          "  \"s\", property_type)");
-  } else {
-      string_obj_pos_val = CompileRun(
-          "GetChildPosByProperty(\n"
-          "  GetChildPosByProperty(\n"
-          "    GetChildPosByProperty("
-          "      parsed.nodes[1 + children_offset + child_to_node_offset],"
-          "      \"b\",shortcut_type),\n"
-          "    \"x\", property_type),"
-          "  \"s\", property_type)");
-  }
+  v8::Local<v8::Value> string_obj_pos_val = CompileRun(
+      "GetChildPosByProperty(\n"
+      "  GetChildPosByProperty(\n"
+      "    GetChildPosByProperty("
+      "      parsed.edges[parsed.nodes[edges_index_offset]"
+      "                   + edge_to_node_offset],"
+      "      \"b\", property_type),\n"
+      "    \"x\", property_type),"
+      "  \"s\", property_type)");
   CHECK(!string_obj_pos_val.IsEmpty());
   int string_obj_pos =
       static_cast<int>(string_obj_pos_val->ToNumber()->Value());
@@ -705,6 +689,200 @@ TEST(HeapSnapshotJSONSerializationAborting) {
   CHECK_EQ(0, stream.eos_signaled());
 }
 
+namespace {
+
+class TestStatsStream : public v8::OutputStream {
+ public:
+  TestStatsStream()
+    : eos_signaled_(0),
+      updates_written_(0),
+      entries_count_(0),
+      entries_size_(0),
+      intervals_count_(0),
+      first_interval_index_(-1) { }
+  TestStatsStream(const TestStatsStream& stream)
+    : v8::OutputStream(stream),
+      eos_signaled_(stream.eos_signaled_),
+      updates_written_(stream.updates_written_),
+      entries_count_(stream.entries_count_),
+      entries_size_(stream.entries_size_),
+      intervals_count_(stream.intervals_count_),
+      first_interval_index_(stream.first_interval_index_) { }
+  virtual ~TestStatsStream() {}
+  virtual void EndOfStream() { ++eos_signaled_; }
+  virtual WriteResult WriteAsciiChunk(char* buffer, int chars_written) {
+    ASSERT(false);
+    return kAbort;
+  }
+  virtual WriteResult WriteHeapStatsChunk(v8::HeapStatsUpdate* buffer,
+                                          int updates_written) {
+    ++intervals_count_;
+    ASSERT(updates_written);
+    updates_written_ += updates_written;
+    entries_count_ = 0;
+    if (first_interval_index_ == -1 && updates_written != 0)
+      first_interval_index_ = buffer[0].index;
+    for (int i = 0; i < updates_written; ++i) {
+      entries_count_ += buffer[i].count;
+      entries_size_ += buffer[i].size;
+    }
+
+    return kContinue;
+  }
+  int eos_signaled() { return eos_signaled_; }
+  int updates_written() { return updates_written_; }
+  uint32_t entries_count() const { return entries_count_; }
+  uint32_t entries_size() const { return entries_size_; }
+  int intervals_count() const { return intervals_count_; }
+  int first_interval_index() const { return first_interval_index_; }
+
+ private:
+  int eos_signaled_;
+  int updates_written_;
+  uint32_t entries_count_;
+  uint32_t entries_size_;
+  int intervals_count_;
+  int first_interval_index_;
+};
+
+}  // namespace
+
+static TestStatsStream GetHeapStatsUpdate() {
+  TestStatsStream stream;
+  v8::HeapProfiler::PushHeapObjectsStats(&stream);
+  CHECK_EQ(1, stream.eos_signaled());
+  return stream;
+}
+
+
+TEST(HeapSnapshotObjectsStats) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  v8::HeapProfiler::StartHeapObjectsTracking();
+  // We have to call GC 5 times. In other case the garbage will be
+  // the reason of flakiness.
+  for (int i = 0; i < 5; ++i) {
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+  }
+
+  {
+    // Single chunk of data expected in update. Initial data.
+    TestStatsStream stats_update = GetHeapStatsUpdate();
+    CHECK_EQ(1, stats_update.intervals_count());
+    CHECK_EQ(1, stats_update.updates_written());
+    CHECK_LT(0, stats_update.entries_size());
+    CHECK_EQ(0, stats_update.first_interval_index());
+  }
+
+  // No data expected in update because nothing has happened.
+  CHECK_EQ(0, GetHeapStatsUpdate().updates_written());
+  {
+    v8::HandleScope inner_scope_1;
+    v8_str("string1");
+    {
+      // Single chunk of data with one new entry expected in update.
+      TestStatsStream stats_update = GetHeapStatsUpdate();
+      CHECK_EQ(1, stats_update.intervals_count());
+      CHECK_EQ(1, stats_update.updates_written());
+      CHECK_LT(0, stats_update.entries_size());
+      CHECK_EQ(1, stats_update.entries_count());
+      CHECK_EQ(2, stats_update.first_interval_index());
+    }
+
+    // No data expected in update because nothing happened.
+    CHECK_EQ(0, GetHeapStatsUpdate().updates_written());
+
+    {
+      v8::HandleScope inner_scope_2;
+      v8_str("string2");
+
+      uint32_t entries_size;
+      {
+        v8::HandleScope inner_scope_3;
+        v8_str("string3");
+        v8_str("string4");
+
+        {
+          // Single chunk of data with three new entries expected in update.
+          TestStatsStream stats_update = GetHeapStatsUpdate();
+          CHECK_EQ(1, stats_update.intervals_count());
+          CHECK_EQ(1, stats_update.updates_written());
+          CHECK_LT(0, entries_size = stats_update.entries_size());
+          CHECK_EQ(3, stats_update.entries_count());
+          CHECK_EQ(4, stats_update.first_interval_index());
+        }
+      }
+
+      {
+        // Single chunk of data with two left entries expected in update.
+        TestStatsStream stats_update = GetHeapStatsUpdate();
+        CHECK_EQ(1, stats_update.intervals_count());
+        CHECK_EQ(1, stats_update.updates_written());
+        CHECK_GT(entries_size, stats_update.entries_size());
+        CHECK_EQ(1, stats_update.entries_count());
+        // Two strings from forth interval were released.
+        CHECK_EQ(4, stats_update.first_interval_index());
+      }
+    }
+
+    {
+      // Single chunk of data with 0 left entries expected in update.
+      TestStatsStream stats_update = GetHeapStatsUpdate();
+      CHECK_EQ(1, stats_update.intervals_count());
+      CHECK_EQ(1, stats_update.updates_written());
+      CHECK_EQ(0, stats_update.entries_size());
+      CHECK_EQ(0, stats_update.entries_count());
+      // The last string from forth interval was released.
+      CHECK_EQ(4, stats_update.first_interval_index());
+    }
+  }
+  {
+    // Single chunk of data with 0 left entries expected in update.
+    TestStatsStream stats_update = GetHeapStatsUpdate();
+    CHECK_EQ(1, stats_update.intervals_count());
+    CHECK_EQ(1, stats_update.updates_written());
+    CHECK_EQ(0, stats_update.entries_size());
+    CHECK_EQ(0, stats_update.entries_count());
+    // The only string from the second interval was released.
+    CHECK_EQ(2, stats_update.first_interval_index());
+  }
+
+  v8::Local<v8::Array> array = v8::Array::New();
+  CHECK_EQ(0, array->Length());
+  // Force array's buffer allocation.
+  array->Set(2, v8_num(7));
+
+  uint32_t entries_size;
+  {
+    // Single chunk of data with 2 entries expected in update.
+    TestStatsStream stats_update = GetHeapStatsUpdate();
+    CHECK_EQ(1, stats_update.intervals_count());
+    CHECK_EQ(1, stats_update.updates_written());
+    CHECK_LT(0, entries_size = stats_update.entries_size());
+    // They are the array and its buffer.
+    CHECK_EQ(2, stats_update.entries_count());
+    CHECK_EQ(8, stats_update.first_interval_index());
+  }
+
+  for (int i = 0; i < 100; ++i)
+    array->Set(i, v8_num(i));
+
+  {
+    // Single chunk of data with 1 entry expected in update.
+    TestStatsStream stats_update = GetHeapStatsUpdate();
+    CHECK_EQ(1, stats_update.intervals_count());
+    // The first interval was changed because old buffer was collected.
+    // The second interval was changed because new buffer was allocated.
+    CHECK_EQ(2, stats_update.updates_written());
+    CHECK_LT(entries_size, stats_update.entries_size());
+    CHECK_EQ(2, stats_update.entries_count());
+    CHECK_EQ(8, stats_update.first_interval_index());
+  }
+
+  v8::HeapProfiler::StopHeapObjectsTracking();
+}
+
 
 static void CheckChildrenIds(const v8::HeapSnapshot* snapshot,
                              const v8::HeapGraphNode* node,
@@ -735,6 +913,42 @@ TEST(HeapSnapshotGetNodeById) {
 }
 
 
+TEST(HeapSnapshotGetSnapshotObjectId) {
+  v8::HandleScope scope;
+  LocalContext env;
+  CompileRun("globalObject = {};\n");
+  const v8::HeapSnapshot* snapshot =
+      v8::HeapProfiler::TakeSnapshot(v8_str("get_snapshot_object_id"));
+  const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+  const v8::HeapGraphNode* global_object =
+      GetProperty(global, v8::HeapGraphEdge::kProperty, "globalObject");
+  CHECK(global_object);
+
+  v8::Local<v8::Value> globalObjectHandle =
+      env->Global()->Get(v8::String::New("globalObject"));
+  CHECK(!globalObjectHandle.IsEmpty());
+  CHECK(globalObjectHandle->IsObject());
+
+  v8::SnapshotObjectId id =
+      v8::HeapProfiler::GetSnapshotObjectId(globalObjectHandle);
+  CHECK_NE(static_cast<int>(v8::HeapProfiler::kUnknownObjectId),
+           id);
+  CHECK_EQ(static_cast<int>(id), global_object->GetId());
+}
+
+
+TEST(HeapSnapshotUnknownSnapshotObjectId) {
+  v8::HandleScope scope;
+  LocalContext env;
+  CompileRun("globalObject = {};\n");
+  const v8::HeapSnapshot* snapshot =
+      v8::HeapProfiler::TakeSnapshot(v8_str("unknown_object_id"));
+  const v8::HeapGraphNode* node =
+      snapshot->GetNodeById(v8::HeapProfiler::kUnknownObjectId);
+  CHECK_EQ(NULL, node);
+}
+
+
 namespace {
 
 class TestActivityControl : public v8::ActivityControl {
@@ -973,9 +1187,8 @@ TEST(HeapSnapshotImplicitReferences) {
       v8::HeapProfiler::TakeSnapshot(v8_str("implicit_refs"));
 
   const v8::HeapGraphNode* global_object = GetGlobalObject(snapshot);
-  // Use kShortcut type to skip intermediate JSGlobalPropertyCell
   const v8::HeapGraphNode* obj0 = GetProperty(
-      global_object, v8::HeapGraphEdge::kShortcut, "root_object");
+      global_object, v8::HeapGraphEdge::kProperty, "root_object");
   CHECK(obj0);
   CHECK_EQ(v8::HeapGraphNode::kObject, obj0->GetType());
   const v8::HeapGraphNode* obj1 = GetProperty(
@@ -1148,7 +1361,7 @@ TEST(GetHeapValue) {
       env->Global()->GetPrototype().As<v8::Object>();
   CHECK(js_global == global->GetHeapValue());
   const v8::HeapGraphNode* obj = GetProperty(
-      global, v8::HeapGraphEdge::kShortcut, "a");
+      global, v8::HeapGraphEdge::kProperty, "a");
   CHECK(obj->GetHeapValue()->IsObject());
   v8::Local<v8::Object> js_obj = js_global->Get(v8_str("a")).As<v8::Object>();
   CHECK(js_obj == obj->GetHeapValue());
@@ -1177,7 +1390,7 @@ TEST(GetHeapValueForDeletedObject) {
       v8::HeapProfiler::TakeSnapshot(v8_str("snapshot"));
   const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
   const v8::HeapGraphNode* obj = GetProperty(
-      global, v8::HeapGraphEdge::kShortcut, "a");
+      global, v8::HeapGraphEdge::kProperty, "a");
   const v8::HeapGraphNode* prop = GetProperty(
       obj, v8::HeapGraphEdge::kProperty, "p");
   {
@@ -1264,7 +1477,7 @@ TEST(FastCaseGetter) {
   const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
   CHECK_NE(NULL, global);
   const v8::HeapGraphNode* obj1 =
-      GetProperty(global, v8::HeapGraphEdge::kShortcut, "obj1");
+      GetProperty(global, v8::HeapGraphEdge::kProperty, "obj1");
   CHECK_NE(NULL, obj1);
   const v8::HeapGraphNode* getterFunction =
       GetProperty(obj1, v8::HeapGraphEdge::kProperty, "get-propWithGetter");
@@ -1346,7 +1559,7 @@ TEST(SfiAndJsFunctionWeakRefs) {
   const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
   CHECK_NE(NULL, global);
   const v8::HeapGraphNode* fun =
-      GetProperty(global, v8::HeapGraphEdge::kShortcut, "fun");
+      GetProperty(global, v8::HeapGraphEdge::kProperty, "fun");
   CHECK(HasWeakEdge(fun));
   const v8::HeapGraphNode* shared =
       GetProperty(fun, v8::HeapGraphEdge::kInternal, "shared");
@@ -1354,6 +1567,30 @@ TEST(SfiAndJsFunctionWeakRefs) {
 }
 
 
+TEST(NoDebugObjectInSnapshot) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  v8::internal::Isolate::Current()->debug()->Load();
+  CompileRun("foo = {};");
+  const v8::HeapSnapshot* snapshot =
+      v8::HeapProfiler::TakeSnapshot(v8_str("snapshot"));
+  const v8::HeapGraphNode* root = snapshot->GetRoot();
+  int globals_count = 0;
+  for (int i = 0; i < root->GetChildrenCount(); ++i) {
+    const v8::HeapGraphEdge* edge = root->GetChild(i);
+    if (edge->GetType() == v8::HeapGraphEdge::kShortcut) {
+      ++globals_count;
+      const v8::HeapGraphNode* global = edge->GetToNode();
+      const v8::HeapGraphNode* foo =
+          GetProperty(global, v8::HeapGraphEdge::kProperty, "foo");
+      CHECK_NE(NULL, foo);
+    }
+  }
+  CHECK_EQ(1, globals_count);
+}
+
+
 TEST(PersistentHandleCount) {
   v8::HandleScope scope;
   LocalContext env;
@@ -1386,3 +1623,44 @@ TEST(PersistentHandleCount) {
   p_BBB.Dispose();
   CHECK_EQ(global_handle_count, v8::HeapProfiler::GetPersistentHandleCount());
 }
+
+
+TEST(AllStrongGcRootsHaveNames) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  CompileRun("foo = {};");
+  const v8::HeapSnapshot* snapshot =
+      v8::HeapProfiler::TakeSnapshot(v8_str("snapshot"));
+  const v8::HeapGraphNode* gc_roots = GetNode(
+      snapshot->GetRoot(), v8::HeapGraphNode::kObject, "(GC roots)");
+  CHECK_NE(NULL, gc_roots);
+  const v8::HeapGraphNode* strong_roots = GetNode(
+      gc_roots, v8::HeapGraphNode::kObject, "(Strong roots)");
+  CHECK_NE(NULL, strong_roots);
+  for (int i = 0; i < strong_roots->GetChildrenCount(); ++i) {
+    const v8::HeapGraphEdge* edge = strong_roots->GetChild(i);
+    CHECK_EQ(v8::HeapGraphEdge::kInternal, edge->GetType());
+    v8::String::AsciiValue name(edge->GetName());
+    CHECK(isalpha(**name));
+  }
+}
+
+
+TEST(NoRefsToNonEssentialEntries) {
+  v8::HandleScope scope;
+  LocalContext env;
+  CompileRun("global_object = {};\n");
+  const v8::HeapSnapshot* snapshot =
+      v8::HeapProfiler::TakeSnapshot(v8_str("snapshot"));
+  const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+  const v8::HeapGraphNode* global_object =
+      GetProperty(global, v8::HeapGraphEdge::kProperty, "global_object");
+  CHECK_NE(NULL, global_object);
+  const v8::HeapGraphNode* properties =
+      GetProperty(global_object, v8::HeapGraphEdge::kInternal, "properties");
+  CHECK_EQ(NULL, properties);
+  const v8::HeapGraphNode* elements =
+      GetProperty(global_object, v8::HeapGraphEdge::kInternal, "elements");
+  CHECK_EQ(NULL, elements);
+}
index 2904e66..d4a40bf 100644 (file)
@@ -9,7 +9,6 @@
 #include "macro-assembler.h"
 #include "global-handles.h"
 #include "cctest.h"
-#include "snapshot.h"
 
 using namespace v8::internal;
 
@@ -1215,7 +1214,9 @@ TEST(TestSizeOfObjects) {
   // The heap size should go back to initial size after a full GC, even
   // though sweeping didn't finish yet.
   HEAP->CollectAllGarbage(Heap::kNoGCFlags);
-  CHECK(!HEAP->old_pointer_space()->IsSweepingComplete());
+
+  // Normally sweeping would not be complete here, but no guarantees.
+
   CHECK_EQ(initial_size, static_cast<int>(HEAP->SizeOfObjects()));
 
   // Advancing the sweeper step-wise should not change the heap size.
@@ -1276,6 +1277,13 @@ TEST(GrowAndShrinkNewSpace) {
   InitializeVM();
   NewSpace* new_space = HEAP->new_space();
 
+  if (HEAP->ReservedSemiSpaceSize() == HEAP->InitialSemiSpaceSize()) {
+    // The max size cannot exceed the reserved size, since semispaces must be
+    // always within the reserved space.  We can't test new space growing and
+    // shrinking if the reserved size is the same as the minimum (initial) size.
+    return;
+  }
+
   // Explicitly growing should double the space capacity.
   intptr_t old_capacity, new_capacity;
   old_capacity = new_space->Capacity();
@@ -1316,6 +1324,14 @@ TEST(GrowAndShrinkNewSpace) {
 
 TEST(CollectingAllAvailableGarbageShrinksNewSpace) {
   InitializeVM();
+
+  if (HEAP->ReservedSemiSpaceSize() == HEAP->InitialSemiSpaceSize()) {
+    // The max size cannot exceed the reserved size, since semispaces must be
+    // always within the reserved space.  We can't test new space growing and
+    // shrinking if the reserved size is the same as the minimum (initial) size.
+    return;
+  }
+
   v8::HandleScope scope;
   NewSpace* new_space = HEAP->new_space();
   intptr_t old_capacity, new_capacity;
@@ -1344,14 +1360,13 @@ static int NumberOfGlobalObjects() {
 // optimized code.
 TEST(LeakGlobalContextViaMap) {
   i::FLAG_allow_natives_syntax = true;
-  bool snapshot_enabled = i::Snapshot::IsEnabled();
   v8::HandleScope outer_scope;
   v8::Persistent<v8::Context> ctx1 = v8::Context::New();
   v8::Persistent<v8::Context> ctx2 = v8::Context::New();
   ctx1->Enter();
 
   HEAP->CollectAllAvailableGarbage();
-  CHECK_EQ((snapshot_enabled ? 6 : 4), NumberOfGlobalObjects());
+  CHECK_EQ(4, NumberOfGlobalObjects());
 
   {
     v8::HandleScope inner_scope;
@@ -1371,7 +1386,7 @@ TEST(LeakGlobalContextViaMap) {
     ctx1.Dispose();
   }
   HEAP->CollectAllAvailableGarbage();
-  CHECK_EQ((snapshot_enabled ? 3 : 2), NumberOfGlobalObjects());
+  CHECK_EQ(2, NumberOfGlobalObjects());
   ctx2.Dispose();
   HEAP->CollectAllAvailableGarbage();
   CHECK_EQ(0, NumberOfGlobalObjects());
@@ -1382,14 +1397,13 @@ TEST(LeakGlobalContextViaMap) {
 // optimized code.
 TEST(LeakGlobalContextViaFunction) {
   i::FLAG_allow_natives_syntax = true;
-  bool snapshot_enabled = i::Snapshot::IsEnabled();
   v8::HandleScope outer_scope;
   v8::Persistent<v8::Context> ctx1 = v8::Context::New();
   v8::Persistent<v8::Context> ctx2 = v8::Context::New();
   ctx1->Enter();
 
   HEAP->CollectAllAvailableGarbage();
-  CHECK_EQ((snapshot_enabled ? 6 : 4), NumberOfGlobalObjects());
+  CHECK_EQ(4, NumberOfGlobalObjects());
 
   {
     v8::HandleScope inner_scope;
@@ -1409,7 +1423,7 @@ TEST(LeakGlobalContextViaFunction) {
     ctx1.Dispose();
   }
   HEAP->CollectAllAvailableGarbage();
-  CHECK_EQ((snapshot_enabled ? 3 : 2), NumberOfGlobalObjects());
+  CHECK_EQ(2, NumberOfGlobalObjects());
   ctx2.Dispose();
   HEAP->CollectAllAvailableGarbage();
   CHECK_EQ(0, NumberOfGlobalObjects());
@@ -1418,14 +1432,13 @@ TEST(LeakGlobalContextViaFunction) {
 
 TEST(LeakGlobalContextViaMapKeyed) {
   i::FLAG_allow_natives_syntax = true;
-  bool snapshot_enabled = i::Snapshot::IsEnabled();
   v8::HandleScope outer_scope;
   v8::Persistent<v8::Context> ctx1 = v8::Context::New();
   v8::Persistent<v8::Context> ctx2 = v8::Context::New();
   ctx1->Enter();
 
   HEAP->CollectAllAvailableGarbage();
-  CHECK_EQ((snapshot_enabled ? 6 : 4), NumberOfGlobalObjects());
+  CHECK_EQ(4, NumberOfGlobalObjects());
 
   {
     v8::HandleScope inner_scope;
@@ -1445,7 +1458,7 @@ TEST(LeakGlobalContextViaMapKeyed) {
     ctx1.Dispose();
   }
   HEAP->CollectAllAvailableGarbage();
-  CHECK_EQ((snapshot_enabled ? 3 : 2), NumberOfGlobalObjects());
+  CHECK_EQ(2, NumberOfGlobalObjects());
   ctx2.Dispose();
   HEAP->CollectAllAvailableGarbage();
   CHECK_EQ(0, NumberOfGlobalObjects());
@@ -1454,14 +1467,13 @@ TEST(LeakGlobalContextViaMapKeyed) {
 
 TEST(LeakGlobalContextViaMapProto) {
   i::FLAG_allow_natives_syntax = true;
-  bool snapshot_enabled = i::Snapshot::IsEnabled();
   v8::HandleScope outer_scope;
   v8::Persistent<v8::Context> ctx1 = v8::Context::New();
   v8::Persistent<v8::Context> ctx2 = v8::Context::New();
   ctx1->Enter();
 
   HEAP->CollectAllAvailableGarbage();
-  CHECK_EQ((snapshot_enabled ? 6 : 4), NumberOfGlobalObjects());
+  CHECK_EQ(4, NumberOfGlobalObjects());
 
   {
     v8::HandleScope inner_scope;
@@ -1485,7 +1497,7 @@ TEST(LeakGlobalContextViaMapProto) {
     ctx1.Dispose();
   }
   HEAP->CollectAllAvailableGarbage();
-  CHECK_EQ((snapshot_enabled ? 3 : 2), NumberOfGlobalObjects());
+  CHECK_EQ(2, NumberOfGlobalObjects());
   ctx2.Dispose();
   HEAP->CollectAllAvailableGarbage();
   CHECK_EQ(0, NumberOfGlobalObjects());
@@ -1639,6 +1651,15 @@ TEST(ResetSharedFunctionInfoCountersDuringIncrementalMarking) {
   while (!marking->IsStopped() && !marking->IsComplete()) {
     marking->Step(1 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
   }
+  if (!marking->IsStopped() || marking->should_hurry()) {
+    // We don't normally finish a GC via Step(), we normally finish by
+    // setting the stack guard and then do the final steps in the stack
+    // guard interrupt.  But here we didn't ask for that, and there is no
+    // JS code running to trigger the interrupt, so we explicitly finalize
+    // here.
+    HEAP->CollectAllGarbage(Heap::kNoGCFlags,
+                            "Test finalizing incremental mark-sweep");
+  }
 
   CHECK_EQ(HEAP->global_ic_age(), f->shared()->ic_age());
   CHECK_EQ(0, f->shared()->opt_count());
@@ -1714,3 +1735,60 @@ TEST(OptimizedAllocationAlwaysInNewSpace) {
 
   CHECK(HEAP->InNewSpace(*o));
 }
+
+
+static int CountMapTransitions(Map* map) {
+  int result = 0;
+  DescriptorArray* descs = map->instance_descriptors();
+  for (int i = 0; i < descs->number_of_descriptors(); i++) {
+    if (descs->IsTransitionOnly(i)) {
+      result++;
+    }
+  }
+  return result;
+}
+
+
+// Test that map transitions are cleared and maps are collected with
+// incremental marking as well.
+TEST(Regress1465) {
+  i::FLAG_allow_natives_syntax = true;
+  i::FLAG_trace_incremental_marking = true;
+  InitializeVM();
+  v8::HandleScope scope;
+
+  #define TRANSITION_COUNT 256
+  for (int i = 0; i < TRANSITION_COUNT; i++) {
+    EmbeddedVector<char, 64> buffer;
+    OS::SNPrintF(buffer, "var o = new Object; o.prop%d = %d;", i, i);
+    CompileRun(buffer.start());
+  }
+  CompileRun("var root = new Object;");
+  Handle<JSObject> root =
+      v8::Utils::OpenHandle(
+          *v8::Handle<v8::Object>::Cast(
+              v8::Context::GetCurrent()->Global()->Get(v8_str("root"))));
+
+  // Count number of live transitions before marking.
+  int transitions_before = CountMapTransitions(root->map());
+  CompileRun("%DebugPrint(root);");
+  CHECK_EQ(TRANSITION_COUNT, transitions_before);
+
+  // Go through all incremental marking steps in one swoop.
+  IncrementalMarking* marking = HEAP->incremental_marking();
+  CHECK(marking->IsStopped());
+  marking->Start();
+  CHECK(marking->IsMarking());
+  while (!marking->IsComplete()) {
+    marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+  }
+  CHECK(marking->IsComplete());
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+  CHECK(marking->IsStopped());
+
+  // Count number of live transitions after marking.  Note that one transition
+  // is left, because 'o' still holds an instance of one transition target.
+  int transitions_after = CountMapTransitions(root->map());
+  CompileRun("%DebugPrint(root);");
+  CHECK_EQ(1, transitions_after);
+}
index 7520b05..4c78f02 100644 (file)
@@ -130,6 +130,18 @@ TEST(RemoveLast) {
 }
 
 
+TEST(Allocate) {
+  List<int> list(4);
+  list.Add(1);
+  CHECK_EQ(1, list.length());
+  list.Allocate(100);
+  CHECK_EQ(100, list.length());
+  CHECK_LE(100, list.capacity());
+  list[99] = 123;
+  CHECK_EQ(123, list[99]);
+}
+
+
 TEST(Clear) {
   List<int> list(4);
   CHECK_EQ(0, list.length());
index 83a576d..700f322 100644 (file)
@@ -540,7 +540,7 @@ TEST(BootUpMemoryUse) {
       }
     } else {
       if (v8::internal::Snapshot::IsEnabled()) {
-        CHECK_LE(booted_memory - initial_memory, 2600 * 1024);  // 2484.
+        CHECK_LE(booted_memory - initial_memory, 2800 * 1024);  // 2484.
       } else {
         CHECK_LE(booted_memory - initial_memory, 2950 * 1024);  // 2844
       }
index 54898a0..e89e6cd 100644 (file)
@@ -1590,7 +1590,7 @@ TEST(CharClassDifference) {
   ZoneScope zone_scope(Isolate::Current(), DELETE_ON_EXIT);
   ZoneList<CharacterRange>* base = new ZoneList<CharacterRange>(1);
   base->Add(CharacterRange::Everything());
-  Vector<const uc16> overlay = CharacterRange::GetWordBounds();
+  Vector<const int> overlay = CharacterRange::GetWordBounds();
   ZoneList<CharacterRange>* included = NULL;
   ZoneList<CharacterRange>* excluded = NULL;
   CharacterRange::Split(base, overlay, &included, &excluded);
@@ -1599,7 +1599,7 @@ TEST(CharClassDifference) {
     if (in_base) {
       bool in_overlay = false;
       for (int j = 0; !in_overlay && j < overlay.length(); j += 2) {
-        if (overlay[j] <= i && i <= overlay[j+1])
+        if (overlay[j] <= i && i < overlay[j+1])
           in_overlay = true;
       }
       CHECK_EQ(in_overlay, InClass(i, included));
@@ -1672,16 +1672,6 @@ TEST(CanonicalizeCharacterSets) {
   ASSERT_EQ(30, list->at(0).to());
 }
 
-// Checks whether a character is in the set represented by a list of ranges.
-static bool CharacterInSet(ZoneList<CharacterRange>* set, uc16 value) {
-  for (int i = 0; i < set->length(); i++) {
-    CharacterRange range = set->at(i);
-    if (range.from() <= value && value <= range.to()) {
-      return true;
-    }
-  }
-  return false;
-}
 
 TEST(CharacterRangeMerge) {
   v8::internal::V8::Initialize(NULL);
@@ -1768,67 +1758,6 @@ TEST(CharacterRangeMerge) {
   ZoneList<CharacterRange> first_only(4);
   ZoneList<CharacterRange> second_only(4);
   ZoneList<CharacterRange> both(4);
-
-  // Merge one direction.
-  CharacterRange::Merge(&l1, &l2, &first_only, &second_only, &both);
-
-  CHECK(CharacterRange::IsCanonical(&first_only));
-  CHECK(CharacterRange::IsCanonical(&second_only));
-  CHECK(CharacterRange::IsCanonical(&both));
-
-  for (uc16 i = 0; i < offset; i++) {
-    bool in_first = CharacterInSet(&l1, i);
-    bool in_second = CharacterInSet(&l2, i);
-    CHECK((in_first && !in_second) == CharacterInSet(&first_only, i));
-    CHECK((!in_first && in_second) == CharacterInSet(&second_only, i));
-    CHECK((in_first && in_second) == CharacterInSet(&both, i));
-  }
-
-  first_only.Clear();
-  second_only.Clear();
-  both.Clear();
-
-  // Merge other direction.
-  CharacterRange::Merge(&l2, &l1, &second_only, &first_only, &both);
-
-  CHECK(CharacterRange::IsCanonical(&first_only));
-  CHECK(CharacterRange::IsCanonical(&second_only));
-  CHECK(CharacterRange::IsCanonical(&both));
-
-  for (uc16 i = 0; i < offset; i++) {
-    bool in_first = CharacterInSet(&l1, i);
-    bool in_second = CharacterInSet(&l2, i);
-    CHECK((in_first && !in_second) == CharacterInSet(&first_only, i));
-    CHECK((!in_first && in_second) == CharacterInSet(&second_only, i));
-    CHECK((in_first && in_second) == CharacterInSet(&both, i));
-  }
-
-  first_only.Clear();
-  second_only.Clear();
-  both.Clear();
-
-  // Merge but don't record all combinations.
-  CharacterRange::Merge(&l1, &l2, NULL, NULL, &both);
-
-  CHECK(CharacterRange::IsCanonical(&both));
-
-  for (uc16 i = 0; i < offset; i++) {
-    bool in_first = CharacterInSet(&l1, i);
-    bool in_second = CharacterInSet(&l2, i);
-    CHECK((in_first && in_second) == CharacterInSet(&both, i));
-  }
-
-  // Merge into same set.
-  ZoneList<CharacterRange> all(4);
-  CharacterRange::Merge(&l1, &l2, &all, &all, &all);
-
-  CHECK(CharacterRange::IsCanonical(&all));
-
-  for (uc16 i = 0; i < offset; i++) {
-    bool in_first = CharacterInSet(&l1, i);
-    bool in_second = CharacterInSet(&l2, i);
-    CHECK((in_first || in_second) == CharacterInSet(&all, i));
-  }
 }
 
 
index e2a179f..d86886f 100644 (file)
@@ -620,3 +620,55 @@ TEST(AsciiArrayJoin) {
   CHECK(result.IsEmpty());
   CHECK(context->HasOutOfMemoryException());
 }
+
+
+static void CheckException(const char* source) {
+  // An empty handle is returned upon exception.
+  CHECK(CompileRun(source).IsEmpty());
+}
+
+
+TEST(RobustSubStringStub) {
+  // This tests whether the SubStringStub can handle unsafe arguments.
+  // If not recognized, those unsafe arguments lead to out-of-bounds reads.
+  FLAG_allow_natives_syntax = true;
+  InitializeVM();
+  HandleScope scope;
+  v8::Local<v8::Value> result;
+  Handle<String> string;
+  CompileRun("var short = 'abcdef';");
+
+  // Invalid indices.
+  CheckException("%_SubString(short,     0,    10000);");
+  CheckException("%_SubString(short, -1234,        5);");
+  CheckException("%_SubString(short,     5,        2);");
+  // Special HeapNumbers.
+  CheckException("%_SubString(short,     1, Infinity);");
+  CheckException("%_SubString(short,   NaN,        5);");
+  // String arguments.
+  CheckException("%_SubString(short,    '2',     '5');");
+  // Ordinary HeapNumbers can be handled (in runtime).
+  result = CompileRun("%_SubString(short, Math.sqrt(4), 5.1);");
+  string = v8::Utils::OpenHandle(v8::String::Cast(*result));
+  CHECK_EQ("cde", *(string->ToCString()));
+
+  CompileRun("var long = 'abcdefghijklmnopqrstuvwxyz';");
+  // Invalid indices.
+  CheckException("%_SubString(long,     0,    10000);");
+  CheckException("%_SubString(long, -1234,       17);");
+  CheckException("%_SubString(long,    17,        2);");
+  // Special HeapNumbers.
+  CheckException("%_SubString(long,     1, Infinity);");
+  CheckException("%_SubString(long,   NaN,       17);");
+  // String arguments.
+  CheckException("%_SubString(long,    '2',    '17');");
+  // Ordinary HeapNumbers within bounds can be handled (in runtime).
+  result = CompileRun("%_SubString(long, Math.sqrt(4), 17.1);");
+  string = v8::Utils::OpenHandle(v8::String::Cast(*result));
+  CHECK_EQ("cdefghijklmnopq", *(string->ToCString()));
+
+  // Test that out-of-bounds substring of a slice fails when the indices
+  // would have been valid for the underlying string.
+  CompileRun("var slice = long.slice(1, 15);");
+  CheckException("%_SubString(slice, 0, 17);");
+}
index 56d5936..7bba7b6 100644 (file)
@@ -48,11 +48,11 @@ static Handle<JSWeakMap> AllocateJSWeakMap() {
 
 static void PutIntoWeakMap(Handle<JSWeakMap> weakmap,
                            Handle<JSObject> key,
-                           int value) {
+                           Handle<Object> value) {
   Handle<ObjectHashTable> table = PutIntoObjectHashTable(
       Handle<ObjectHashTable>(ObjectHashTable::cast(weakmap->table())),
       Handle<JSObject>(JSObject::cast(*key)),
-      Handle<Smi>(Smi::FromInt(value)));
+      value);
   weakmap->set_table(*table);
 }
 
@@ -65,6 +65,7 @@ static void WeakPointerCallback(v8::Persistent<v8::Value> handle, void* id) {
 
 
 TEST(Weakness) {
+  FLAG_incremental_marking = false;
   LocalContext context;
   v8::HandleScope scope;
   Handle<JSWeakMap> weakmap = AllocateJSWeakMap();
@@ -83,7 +84,9 @@ TEST(Weakness) {
   // Put entry into weak map.
   {
     v8::HandleScope scope;
-    PutIntoWeakMap(weakmap, Handle<JSObject>(JSObject::cast(*key)), 23);
+    PutIntoWeakMap(weakmap,
+                   Handle<JSObject>(JSObject::cast(*key)),
+                   Handle<Smi>(Smi::FromInt(23)));
   }
   CHECK_EQ(1, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
 
@@ -133,7 +136,7 @@ TEST(Shrinking) {
     Handle<Map> map = FACTORY->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
     for (int i = 0; i < 32; i++) {
       Handle<JSObject> object = FACTORY->NewJSObjectFromMap(map);
-      PutIntoWeakMap(weakmap, object, i);
+      PutIntoWeakMap(weakmap, object, Handle<Smi>(Smi::FromInt(i)));
     }
   }
 
@@ -152,3 +155,72 @@ TEST(Shrinking) {
   // Check shrunk capacity.
   CHECK_EQ(32, ObjectHashTable::cast(weakmap->table())->Capacity());
 }
+
+
+// Test that weak map values on an evacuation candidate which are not reachable
+// by other paths are correctly recorded in the slots buffer.
+TEST(Regress2060a) {
+  FLAG_always_compact = true;
+  LocalContext context;
+  v8::HandleScope scope;
+  Handle<JSFunction> function =
+      FACTORY->NewFunction(FACTORY->function_symbol(), FACTORY->null_value());
+  Handle<JSObject> key = FACTORY->NewJSObject(function);
+  Handle<JSWeakMap> weakmap = AllocateJSWeakMap();
+
+  // Start second old-space page so that values land on evacuation candidate.
+  Page* first_page = HEAP->old_pointer_space()->anchor()->next_page();
+  FACTORY->NewFixedArray(900 * KB / kPointerSize, TENURED);
+
+  // Fill up weak map with values on an evacuation candidate.
+  {
+    v8::HandleScope scope;
+    for (int i = 0; i < 32; i++) {
+      Handle<JSObject> object = FACTORY->NewJSObject(function, TENURED);
+      CHECK(!HEAP->InNewSpace(object->address()));
+      CHECK(!first_page->Contains(object->address()));
+      PutIntoWeakMap(weakmap, key, object);
+    }
+  }
+
+  // Force compacting garbage collection.
+  CHECK(FLAG_always_compact);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+}
+
+
+// Test that weak map keys on an evacuation candidate which are reachable by
+// other strong paths are correctly recorded in the slots buffer.
+TEST(Regress2060b) {
+  FLAG_always_compact = true;
+#ifdef DEBUG
+  FLAG_verify_heap = true;
+#endif
+  LocalContext context;
+  v8::HandleScope scope;
+  Handle<JSFunction> function =
+      FACTORY->NewFunction(FACTORY->function_symbol(), FACTORY->null_value());
+
+  // Start second old-space page so that keys land on evacuation candidate.
+  Page* first_page = HEAP->old_pointer_space()->anchor()->next_page();
+  FACTORY->NewFixedArray(900 * KB / kPointerSize, TENURED);
+
+  // Fill up weak map with keys on an evacuation candidate.
+  Handle<JSObject> keys[32];
+  for (int i = 0; i < 32; i++) {
+    keys[i] = FACTORY->NewJSObject(function, TENURED);
+    CHECK(!HEAP->InNewSpace(keys[i]->address()));
+    CHECK(!first_page->Contains(keys[i]->address()));
+  }
+  Handle<JSWeakMap> weakmap = AllocateJSWeakMap();
+  for (int i = 0; i < 32; i++) {
+    PutIntoWeakMap(weakmap, keys[i], Handle<Smi>(Smi::FromInt(i)));
+  }
+
+  // Force compacting garbage collection. The subsequent collections are used
+  // to verify that key references were actually updated.
+  CHECK(FLAG_always_compact);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+}
index b2eabc4..f1387e8 100644 (file)
@@ -53,6 +53,8 @@ class CcTestCase(test.TestCase):
       serialization_file = join('obj', 'test', self.mode, 'serdes')
     else:
       serialization_file = join('obj', 'serdes')
+      if not exists(join(self.context.buildspace, 'obj')):
+        os.makedirs(join(self.context.buildspace, 'obj'))
     serialization_file += '_' + self.GetName()
     serialization_file = join(self.context.buildspace, serialization_file)
     serialization_file += ''.join(self.variant_flags).replace('-', '_')
diff --git a/src/3rdparty/v8/test/mjsunit/array-bounds-check-removal.js b/src/3rdparty/v8/test/mjsunit/array-bounds-check-removal.js
new file mode 100644 (file)
index 0000000..81064aa
--- /dev/null
@@ -0,0 +1,145 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+var a = new Int32Array(1024);
+
+function test_base(base,cond) {
+  a[base + 1] = 1;
+  a[base + 4] = 2;
+  a[base + 3] = 3;
+  a[base + 2] = 4;
+  a[base + 4] = base + 4;
+  if (cond) {
+    a[base + 1] = 1;
+    a[base + 2] = 2;
+    a[base + 2] = 3;
+    a[base + 2] = 4;
+    a[base + 4] = base + 4;
+  } else {
+    a[base + 6] = 1;
+    a[base + 4] = 2;
+    a[base + 3] = 3;
+    a[base + 2] = 4;
+    a[base + 4] = base - 4;
+  }
+}
+
+function check_test_base(base,cond) {
+  if (cond) {
+    assertEquals(1, a[base + 1]);
+    assertEquals(4, a[base + 2]);
+    assertEquals(base + 4, a[base + 4]);
+  } else {
+    assertEquals(1, a[base + 6]);
+    assertEquals(3, a[base + 3]);
+    assertEquals(4, a[base + 2]);
+    assertEquals(base - 4, a[base + 4]);
+  }
+}
+
+
+function test_minus(base,cond) {
+  a[base - 1] = 1;
+  a[base - 2] = 2;
+  a[base + 4] = 3;
+  a[base] = 4;
+  a[base + 4] = base + 4;
+  if (cond) {
+    a[base - 4] = 1;
+    a[base + 5] = 2;
+    a[base + 3] = 3;
+    a[base + 2] = 4;
+    a[base + 4] = base + 4;
+  } else {
+    a[base + 6] = 1;
+    a[base + 4] = 2;
+    a[base + 3] = 3;
+    a[base + 2] = 4;
+    a[base + 4] = base - 4;
+  }
+}
+
+function check_test_minus(base,cond) {
+  if (cond) {
+    assertEquals(2, a[base + 5]);
+    assertEquals(3, a[base + 3]);
+    assertEquals(4, a[base + 2]);
+    assertEquals(base + 4, a[base + 4]);
+  } else {
+    assertEquals(1, a[base + 6]);
+    assertEquals(3, a[base + 3]);
+    assertEquals(4, a[base + 2]);
+    assertEquals(base - 4, a[base + 4]);
+  }
+}
+
+test_base(1,true);
+test_base(2,true);
+test_base(1,false);
+test_base(2,false);
+%OptimizeFunctionOnNextCall(test_base);
+test_base(3,true);
+check_test_base(3,true);
+test_base(3,false);
+check_test_base(3,false);
+
+test_minus(5,true);
+test_minus(6,true);
+%OptimizeFunctionOnNextCall(test_minus);
+test_minus(7,true);
+check_test_minus(7,true);
+test_minus(7,false);
+check_test_minus(7,false);
+
+// Optimization status:
+// YES: 1
+// NO: 2
+// ALWAYS: 3
+// NEVER: 4
+
+if (false) {
+test_base(5,true);
+test_base(6,true);
+test_base(5,false);
+test_base(6,false);
+%OptimizeFunctionOnNextCall(test_base);
+test_base(-2,true);
+assertTrue(%GetOptimizationStatus(test_base) != 1);
+
+test_base(5,true);
+test_base(6,true);
+test_base(5,false);
+test_base(6,false);
+%OptimizeFunctionOnNextCall(test_base);
+test_base(2048,true);
+assertTrue(%GetOptimizationStatus(test_base) != 1);
+}
+
+gc();
+
index a0fad7c..8e0ff87 100644 (file)
@@ -25,6 +25,9 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+// On MacOS, this test needs a stack size of at least 538 kBytes.
+// Flags: --stack-size=600
+
 // Test that we can make large object literals that work.
 // Also test that we can attempt to make even larger object literals without
 // crashing.
index d6d9f1b..0b202f7 100644 (file)
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax --inline-construct --nolimit-inlining
+// Flags: --allow-natives-syntax --inline-construct --max-inlined-source-size=999999 --max-inlined-nodes=999999 --max-inlined-nodes-cumulative=999999
 
 // Test that huge constructors (more than 256 this assignments) are
 // handled correctly.
index b6adf7f..f8a2476 100644 (file)
@@ -113,3 +113,70 @@ F4(1);
   %OptimizeFunctionOnNextCall(test_adaptation);
   test_adaptation();
 })();
+
+// Test arguments access from the inlined function.
+function uninlinable(v) {
+  assertEquals(0, v);
+  try { } catch (e) { }
+  return 0;
+}
+
+function toarr_inner() {
+  var a = arguments;
+  var marker = a[0];
+  uninlinable(uninlinable(0, 0), marker.x);
+
+  var r = new Array();
+  for (var i = a.length - 1; i >= 1; i--) {
+    r.push(a[i]);
+  }
+
+  return r;
+}
+
+function toarr1(marker, a, b, c) {
+  return toarr_inner(marker, a / 2, b / 2, c / 2);
+}
+
+function toarr2(marker, a, b, c) {
+  var x = 0;
+  return uninlinable(uninlinable(0, 0),
+                     x = toarr_inner(marker, a / 2, b / 2, c / 2)), x;
+}
+
+function test_toarr(toarr) {
+  var marker = { x: 0 };
+  assertArrayEquals([3, 2, 1], toarr(marker, 2, 4, 6));
+  assertArrayEquals([3, 2, 1], toarr(marker, 2, 4, 6));
+  %OptimizeFunctionOnNextCall(toarr);
+  assertArrayEquals([3, 2, 1], toarr(marker, 2, 4, 6));
+  delete marker.x;
+  assertArrayEquals([3, 2, 1], toarr(marker, 2, 4, 6));
+}
+
+test_toarr(toarr1);
+test_toarr(toarr2);
+
+// Test that arguments access from inlined function uses correct values.
+(function () {
+  function inner(x, y) {
+    "use strict";
+    x = 10;
+    y = 20;
+    for (var i = 0; i < 1; i++) {
+      for (var j = 1; j <= arguments.length; j++) {
+        return arguments[arguments.length - j];
+      }
+    }
+  }
+
+  function outer(x, y) {
+    return inner(x, y);
+  }
+
+  assertEquals(2, outer(1, 2));
+  assertEquals(2, outer(1, 2));
+  assertEquals(2, outer(1, 2));
+  %OptimizeFunctionOnNextCall(outer);
+  assertEquals(2, outer(1, 2));
+})();
index af9e69c..7a3f1e4 100644 (file)
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax --expose-gc --inline-construct
+// Flags: --allow-natives-syntax --inline-construct
 
 // Test inlining of constructor calls.
 
@@ -68,7 +68,9 @@ function TestInAllContexts(constructor) {
   %DeoptimizeFunction(value_context);
   %DeoptimizeFunction(test_context);
   %DeoptimizeFunction(effect_context);
-  gc();  // Makes V8 forget about type information for *_context.
+  %ClearFunctionTypeFeedback(value_context);
+  %ClearFunctionTypeFeedback(test_context);
+  %ClearFunctionTypeFeedback(effect_context);
 }
 
 
index e910bb3..8607cd9 100644 (file)
@@ -36,38 +36,38 @@ assertEquals(8, eval("6;'abc';8"));
 
 // Characters just outside the ranges of hex-escapes.
 // "/" comes just before "0".
-assertEquals("x1/", "\x1/");
-assertEquals("u111/", "\u111/");
+assertThrows('"\\x1/"');
+assertThrows('"\\u111/"');
 assertEquals("\\x1/", RegExp("\\x1/").source);
 assertEquals("\\u111/", RegExp("\\u111/").source);
 
 // ":" comes just after "9".
-assertEquals("x1:", "\x1:");
-assertEquals("u111:", "\u111:");
+assertThrows('"\\x1:"');
+assertThrows('"\\u111:"');
 assertEquals("\\x1:", /\x1:/.source);
 assertEquals("\\u111:", /\u111:/.source);
 
 // "`" comes just before "a".
-assertEquals("x1`", "\x1`");
-assertEquals("u111`", "\u111`");
+assertThrows('"\\x1`"');
+assertThrows('"\\u111`"');
 assertEquals("\\x1`", /\x1`/.source);
 assertEquals("\\u111`", /\u111`/.source);
 
 // "g" comes just before "f".
-assertEquals("x1g", "\x1g");
-assertEquals("u111g", "\u111g");
+assertThrows('"\\x1g"');
+assertThrows('"\\u111g"');
 assertEquals("\\x1g", /\x1g/.source);
 assertEquals("\\u111g", /\u111g/.source);
 
 // "@" comes just before "A".
-assertEquals("x1@", "\x1@");
-assertEquals("u111@", "\u111@");
+assertThrows('"\\x1@"');
+assertThrows('"\\u111@"');
 assertEquals("\\x1@", /\x1@/.source);
 assertEquals("\\u111@", /\u111@/.source);
 
 // "G" comes just after "F".
-assertEquals("x1G", "\x1G");
-assertEquals("u111G", "\u111G");
+assertThrows('"\\x1G"');
+assertThrows('"\\u111G"');
 assertEquals("\\x1G", /\x1G/.source);
 assertEquals("\\u111G", /\u111G/.source);
 
diff --git a/src/3rdparty/v8/test/mjsunit/compiler/optimize-bitnot.js b/src/3rdparty/v8/test/mjsunit/compiler/optimize-bitnot.js
new file mode 100644 (file)
index 0000000..28315a4
--- /dev/null
@@ -0,0 +1,42 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function f(x) {
+  return ~~x;
+}
+
+f(42);
+f(42);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(42, f(42));
+assertEquals(42, f(42.5));
+assertEquals(1/0, 1/f(-0));
+assertEquals(-1, f(0xffffffff));
+assertEquals(0, f(undefined));
+assertEquals(0, f("abc"));
index cf25c0c..efbb2cc 100644 (file)
@@ -148,20 +148,9 @@ function listener(event, exec_state, event_data, data) {
           assertFalse(frame.isConstructCall());
         }
 
-        // When function f is optimized (1 means YES, see runtime.cc) we
-        // expect an optimized frame for f with g1, g2 and g3 inlined.
-        if (%GetOptimizationStatus(f) == 1) {
-          if (i == 1 || i == 2 || i == 3) {
-            assertTrue(frame.isOptimizedFrame());
-            assertTrue(frame.isInlinedFrame());
-            assertEquals(4 - i, frame.inlinedFrameIndex());
-          } else if (i == 4) {
-            assertTrue(frame.isOptimizedFrame());
-            assertFalse(frame.isInlinedFrame());
-          } else {
-            assertFalse(frame.isOptimizedFrame());
-            assertFalse(frame.isInlinedFrame());
-          }
+        if (i > 4) {
+          assertFalse(frame.isOptimizedFrame());
+          assertFalse(frame.isInlinedFrame());
         }
       }
 
index c88a683..9c56a12 100644 (file)
@@ -138,20 +138,9 @@ function listener(event, exec_state, event_data, data) {
           assertFalse(frame.isConstructCall());
         }
 
-        // When function f is optimized (1 means YES, see runtime.cc) we
-        // expect an optimized frame for f with g1, g2 and g3 inlined.
-        if (%GetOptimizationStatus(f) == 1) {
-          if (i == 1 || i == 2 || i == 3) {
-            assertTrue(frame.isOptimizedFrame());
-            assertTrue(frame.isInlinedFrame());
-            assertEquals(4 - i, frame.inlinedFrameIndex());
-          } else if (i == 4) {
-            assertTrue(frame.isOptimizedFrame());
-            assertFalse(frame.isInlinedFrame());
-          } else {
-            assertFalse(frame.isOptimizedFrame());
-            assertFalse(frame.isInlinedFrame());
-          }
+        if (i > 4) {
+          assertFalse(frame.isOptimizedFrame());
+          assertFalse(frame.isInlinedFrame());
         }
       }
 
diff --git a/src/3rdparty/v8/test/mjsunit/debug-function-scopes.js b/src/3rdparty/v8/test/mjsunit/debug-function-scopes.js
new file mode 100644 (file)
index 0000000..4262b95
--- /dev/null
@@ -0,0 +1,162 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+
+// Get the Debug object exposed from the debug context global object.
+var Debug = debug.Debug;
+
+function CheckScope(scope_mirror, scope_expectations, expected_scope_type) {
+  assertEquals(expected_scope_type, scope_mirror.scopeType());
+
+  var scope_object = scope_mirror.scopeObject().value();
+
+  for (var name in scope_expectations) {
+    var actual = scope_object[name];
+    var expected = scope_expectations[name];
+    assertEquals(expected, actual);
+  }
+}
+
+// A copy of the scope types from mirror-debugger.js.
+var ScopeType = { Global: 0,
+                  Local: 1,
+                  With: 2,
+                  Closure: 3,
+                  Catch: 4,
+                  Block: 5 };
+
+var f1 = (function F1(x) {
+  function F2(y) {
+    var z = x + y;
+    with ({w: 5, v: "Capybara"}) {
+      var F3 = function(a, b) {
+        function F4(p) {
+          return p + a + b + z + w + v.length;
+        }
+        return F4;
+      }
+      return F3(4, 5);
+    }
+  }
+  return F2(17);
+})(5);
+
+var mirror = Debug.MakeMirror(f1);
+
+assertEquals(5, mirror.scopeCount());
+
+CheckScope(mirror.scope(0), { a: 4, b: 5 }, ScopeType.Closure);
+CheckScope(mirror.scope(1), { w: 5, v: "Capybara" }, ScopeType.With);
+CheckScope(mirror.scope(2), { y: 17, z: 22 }, ScopeType.Closure);
+CheckScope(mirror.scope(3), { x: 5 }, ScopeType.Closure);
+CheckScope(mirror.scope(4), {}, ScopeType.Global);
+
+var f2 = function() { return 5; }
+
+var mirror = Debug.MakeMirror(f2);
+
+assertEquals(1, mirror.scopeCount());
+
+CheckScope(mirror.scope(0), {}, ScopeType.Global);
+
+var f3 = (function F1(invisible_parameter) {
+  var invisible1 = 1;
+  var visible1 = 10;
+  return (function F2() {
+    var invisible2 = 2;
+    return (function F3() {
+      var visible2 = 20;
+      var invisible2 = 3;
+      return (function () {return visible1 + visible2 + visible1a;});
+    })();
+  })();
+})(5);
+
+var mirror = Debug.MakeMirror(f3);
+
+assertEquals(3, mirror.scopeCount());
+
+CheckScope(mirror.scope(0), { visible2: 20 }, ScopeType.Closure);
+CheckScope(mirror.scope(1), { visible1: 10 }, ScopeType.Closure);
+CheckScope(mirror.scope(2), {}, ScopeType.Global);
+
+
+var f4 = (function One() {
+  try {
+    throw "I'm error 1";
+  } catch (e1) {
+    try {
+      throw "I'm error 2";
+    } catch (e2) {
+      return function GetError() {
+        return e1 + e2;
+      };
+    }
+  }
+})();
+
+var mirror = Debug.MakeMirror(f4);
+
+assertEquals(3, mirror.scopeCount());
+
+CheckScope(mirror.scope(0), { e2: "I'm error 2" }, ScopeType.Catch);
+CheckScope(mirror.scope(1), { e1: "I'm error 1" }, ScopeType.Catch);
+CheckScope(mirror.scope(2), {}, ScopeType.Global);
+
+
+var f5 = (function Raz(p1, p2) {
+  var p3 = p1 + p2;
+  return (function() {
+    var p4 = 20;
+    var p5 = 21;
+    var p6 = 22;
+    return eval("(function(p7){return p1 + p4 + p6 + p7})");
+  })();
+})(1,2);
+
+var mirror = Debug.MakeMirror(f5);
+
+assertEquals(3, mirror.scopeCount());
+
+CheckScope(mirror.scope(0), { p4: 20, p6: 22 }, ScopeType.Closure);
+CheckScope(mirror.scope(1), { p1: 1 }, ScopeType.Closure);
+CheckScope(mirror.scope(2), {}, ScopeType.Global);
+
+
+function CheckNoScopeVisible(f) {
+  var mirror = Debug.MakeMirror(f);
+  assertEquals(0, mirror.scopeCount());
+}
+
+CheckNoScopeVisible(Number);
+
+CheckNoScopeVisible(Function.toString);
+
+// This getter is known to be implemented as closure.
+CheckNoScopeVisible(new Error().__lookupGetter__("stack"));
+
diff --git a/src/3rdparty/v8/test/mjsunit/debug-liveedit-stack-padding.js b/src/3rdparty/v8/test/mjsunit/debug-liveedit-stack-padding.js
new file mode 100644 (file)
index 0000000..36de356
--- /dev/null
@@ -0,0 +1,88 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+
+Debug = debug.Debug;
+
+SlimFunction = eval(
+    "(function() {\n " +
+    "  return 'Cat';\n" +
+    "})\n"
+);
+
+var script = Debug.findScript(SlimFunction);
+
+Debug.setScriptBreakPointById(script.id, 1, 0);
+
+var orig_animal = "'Cat'";
+var patch_pos = script.source.indexOf(orig_animal);
+var new_animal_patch = "'Capybara'";
+
+debugger_handler = (function() {
+  var already_called = false;
+  return function() {
+    if (already_called) {
+      return;
+    }
+    already_called = true;
+
+    var change_log = new Array();
+    try {
+      Debug.LiveEdit.TestApi.ApplySingleChunkPatch(script, patch_pos,
+          orig_animal.length, new_animal_patch, change_log);
+    } finally {
+      print("Change log: " + JSON.stringify(change_log) + "\n");
+    }
+  };
+})();
+
+var saved_exception = null;
+
+function listener(event, exec_state, event_data, data) {
+  if (event == Debug.DebugEvent.Break) {
+    try {
+      debugger_handler();
+    } catch (e) {
+      saved_exception = e;
+    }
+  } else {
+    print("Other: " + event);
+  }
+}
+
+Debug.setListener(listener);
+
+var animal = SlimFunction();
+
+if (saved_exception) {
+  print("Exception: " + saved_exception);
+  assertUnreachable();
+}
+
+assertEquals("Capybara", animal);
index faa732e..e027563 100644 (file)
@@ -78,8 +78,10 @@ function listener(event, exec_state, event_data, data) {
     var response = safeEval(dcp.processDebugJSONRequest(request));
     assertTrue(response.success);
 
-    // Test filtering by id.
-    assertEquals(2, response.body.length);
+    // Test filtering by id.  We have to get at least one script back, but
+    // the exact number depends on the timing of GC.
+    assertTrue(response.body.length >= 1);
+
     var script = response.body[0];
     var request = '{' + base_request + ',"arguments":{"ids":[' +
                   script.id + ']}}';
diff --git a/src/3rdparty/v8/test/mjsunit/debug-stepin-builtin-callback.js b/src/3rdparty/v8/test/mjsunit/debug-stepin-builtin-callback.js
new file mode 100644 (file)
index 0000000..223159d
--- /dev/null
@@ -0,0 +1,157 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+
+// Test stepping into callbacks passed to builtin functions.
+
+Debug = debug.Debug
+
+var exception = false;
+
+function array_listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+      if (breaks == 0) {
+        exec_state.prepareStep(Debug.StepAction.StepIn, 2);
+        breaks = 1;
+      } else if (breaks <= 3) {
+        breaks++;
+        // Check whether we break at the expected line.
+        print(event_data.sourceLineText());
+        assertTrue(event_data.sourceLineText().indexOf("Expected to step") > 0);
+        exec_state.prepareStep(Debug.StepAction.StepIn, 3);
+      }
+    }
+  } catch (e) {
+    exception = true;
+  }
+};
+
+function cb_false(num) {
+  print("element " + num);  // Expected to step to this point.
+  return false;
+}
+
+function cb_true(num) {
+  print("element " + num);  // Expected to step to this point.
+  return true;
+}
+
+function cb_reduce(a, b) {
+  print("elements " + a + " and " + b);  // Expected to step to this point.
+  return a + b;
+}
+
+var a = [1, 2, 3, 4];
+
+Debug.setListener(array_listener);
+
+var breaks = 0;
+debugger;
+a.forEach(cb_true);
+assertFalse(exception);
+assertEquals(4, breaks);
+
+breaks = 0;
+debugger;
+a.some(cb_false);
+assertFalse(exception);
+assertEquals(4, breaks);
+
+breaks = 0;
+debugger;
+a.every(cb_true);
+assertEquals(4, breaks);
+assertFalse(exception);
+
+breaks = 0;
+debugger;
+a.map(cb_true);
+assertFalse(exception);
+assertEquals(4, breaks);
+
+breaks = 0;
+debugger;
+a.filter(cb_true);
+assertFalse(exception);
+assertEquals(4, breaks);
+
+breaks = 0;
+debugger;
+a.reduce(cb_reduce);
+assertFalse(exception);
+assertEquals(4, breaks);
+
+breaks = 0;
+debugger;
+a.reduceRight(cb_reduce);
+assertFalse(exception);
+assertEquals(4, breaks);
+
+Debug.setListener(null);
+
+
+// Test two levels of builtin callbacks:
+// Array.forEach calls a callback function, which by itself uses
+// Array.forEach with another callback function.
+
+function second_level_listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+      if (breaks == 0) {
+        exec_state.prepareStep(Debug.StepAction.StepIn, 3);
+        breaks = 1;
+      } else if (breaks <= 16) {
+        breaks++;
+        // Check whether we break at the expected line.
+        assertTrue(event_data.sourceLineText().indexOf("Expected to step") > 0);
+        // Step two steps further every four breaks to skip the
+        // forEach call in the first level of recurision.
+        var step = (breaks % 4 == 1) ? 6 : 3;
+        exec_state.prepareStep(Debug.StepAction.StepIn, step);
+      }
+    }
+  } catch (e) {
+    exception = true;
+  }
+};
+
+function cb_foreach(num) {
+  a.forEach(cb_true);
+  print("back to the first level of recursion.");
+}
+
+Debug.setListener(second_level_listener);
+
+breaks = 0;
+debugger;
+a.forEach(cb_foreach);
+assertFalse(exception);
+assertEquals(17, breaks);
+
+Debug.setListener(null);
index 93fcb85..20bfe6d 100644 (file)
 // This exercises the code in runtime.cc in
 // DeclareGlobal...Locally().
 
+// Flags: --es52_globals
+
 this.__proto__.foo = 42;
 this.__proto__.bar = 87;
 
-eval("assertEquals(42, foo); var foo = 87;");
+eval("assertEquals(undefined, foo); var foo = 87;");
 assertEquals(87, foo);
 
-eval("assertEquals(87, bar); const bar = 42;");
+eval("assertEquals(undefined, bar); const bar = 42;");
 assertEquals(42, bar);
index 966a162..107164d 100644 (file)
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-var e = new Error();
-assertFalse(e.hasOwnProperty('message'));
-Error.prototype.toString = Object.prototype.toString;
-assertEquals("[object Error]", Error.prototype.toString());
-assertEquals(Object.prototype, Error.prototype.__proto__);
-
-// Check that error construction does not call setters for the
-// properties on error objects in prototypes.
-function fail() { assertTrue(false); };
-ReferenceError.prototype.__defineSetter__('stack', fail);
-ReferenceError.prototype.__defineSetter__('message', fail);
-ReferenceError.prototype.__defineSetter__('type', fail);
-ReferenceError.prototype.__defineSetter__('arguments', fail);
-var e0 = new ReferenceError();
-var e1 = new ReferenceError('123');
-assertTrue(e1.hasOwnProperty('message'));
-assertTrue(e0.hasOwnProperty('stack'));
-assertTrue(e1.hasOwnProperty('stack'));
-assertTrue(e0.hasOwnProperty('type'));
-assertTrue(e1.hasOwnProperty('type'));
-assertTrue(e0.hasOwnProperty('arguments'));
-assertTrue(e1.hasOwnProperty('arguments'));
-
-// Check that the name property on error prototypes is read-only and
-// dont-delete. This is not specified, but allowing overwriting the
-// name property with a getter can leaks error objects from different
-// script tags in the same context in a browser setting. We therefore
-// disallow changes to the name property on error objects.
-assertEquals("ReferenceError", ReferenceError.prototype.name);
-delete ReferenceError.prototype.name;
-assertEquals("ReferenceError", ReferenceError.prototype.name);
-ReferenceError.prototype.name = "not a reference error";
-assertEquals("ReferenceError", ReferenceError.prototype.name);
+// Flags: --allow-natives-syntax
 
 // Check that message and name are not enumerable on Error objects.
 var desc = Object.getOwnPropertyDescriptor(Error.prototype, 'name');
@@ -75,8 +43,75 @@ assertFalse(desc['enumerable']);
 desc = Object.getOwnPropertyDescriptor(e, 'stack');
 assertFalse(desc['enumerable']);
 
+var e = new Error();
+assertFalse(e.hasOwnProperty('message'));
+
 // name is not tested above, but in addition we should have no enumerable
 // properties, so we simply assert that.
 for (var v in e) {
   assertUnreachable();
 }
+
+// Check that error construction does not call setters for the
+// properties on error objects in prototypes.
+function fail() { assertUnreachable(); };
+ReferenceError.prototype.__defineSetter__('name', fail);
+ReferenceError.prototype.__defineSetter__('message', fail);
+ReferenceError.prototype.__defineSetter__('type', fail);
+ReferenceError.prototype.__defineSetter__('arguments', fail);
+ReferenceError.prototype.__defineSetter__('stack', fail);
+
+var e = new ReferenceError();
+assertTrue(e.hasOwnProperty('stack'));
+assertTrue(e.hasOwnProperty('type'));
+assertTrue(e.hasOwnProperty('arguments'));
+
+var e = new ReferenceError('123');
+assertTrue(e.hasOwnProperty('message'));
+assertTrue(e.hasOwnProperty('stack'));
+assertTrue(e.hasOwnProperty('type'));
+assertTrue(e.hasOwnProperty('arguments'));
+
+var e = %MakeReferenceError("my_test_error", [0, 1]);
+assertTrue(e.hasOwnProperty('stack'));
+assertTrue(e.hasOwnProperty('type'));
+assertTrue(e.hasOwnProperty('arguments'));
+assertEquals("my_test_error", e.type)
+
+// Check that intercepting property access from toString is prevented for
+// compiler errors. This is not specified, but allowing interception
+// through a getter can leak error objects from different
+// script tags in the same context in a browser setting.
+var errors = [SyntaxError, ReferenceError, TypeError];
+for (var i in errors) {
+  var name = errors[i].prototype.toString();
+  // Monkey-patch prototype.
+  var props = ["name", "message", "type", "arguments", "stack"];
+  for (var j in props) {
+    errors[i].prototype.__defineGetter__(props[j], fail);
+  }
+  // String conversion should not invoke monkey-patched getters on prototype.
+  var e = new errors[i];
+  assertEquals(name, e.toString());
+  // Custom getters in actual objects are welcome.
+  e.__defineGetter__("name", function() { return "mine"; });
+  assertEquals("mine", e.toString());
+}
+
+// Monkey-patching non-static errors should still be observable.
+function MyError() {}
+MyError.prototype = new Error;
+var errors = [Error, RangeError, EvalError, URIError, MyError];
+for (var i in errors) {
+  errors[i].prototype.__defineGetter__("name", function() { return "my"; });
+  errors[i].prototype.__defineGetter__("message", function() { return "moo"; });
+  var e = new errors[i];
+  assertEquals("my: moo", e.toString());
+}
+
+
+Error.prototype.toString = Object.prototype.toString;
+assertEquals("[object Error]", Error.prototype.toString());
+assertEquals(Object.prototype, Error.prototype.__proto__);
+var e = new Error("foo");
+assertEquals("[object Error]", e.toString());
diff --git a/src/3rdparty/v8/test/mjsunit/harmony/debug-function-scopes.js b/src/3rdparty/v8/test/mjsunit/harmony/debug-function-scopes.js
new file mode 100644 (file)
index 0000000..0113be6
--- /dev/null
@@ -0,0 +1,115 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug --harmony-scoping
+
+"use strict";
+
+// Get the Debug object exposed from the debug context global object.
+var Debug = debug.Debug;
+
+function CheckScope(scope_mirror, scope_expectations, expected_scope_type) {
+  assertEquals(expected_scope_type, scope_mirror.scopeType());
+
+  var scope_object = scope_mirror.scopeObject().value();
+
+  for (let name in scope_expectations) {
+    let actual = scope_object[name];
+    let expected = scope_expectations[name];
+    assertEquals(expected, actual);
+  }
+}
+
+// A copy of the scope types from mirror-debugger.js.
+var ScopeType = { Global: 0,
+                  Local: 1,
+                  With: 2,
+                  Closure: 3,
+                  Catch: 4,
+                  Block: 5 };
+
+var f1 = (function F1(x) {
+  function F2(y) {
+    var z = x + y;
+    {
+      var w =  5;
+      var v = "Capybara";
+      var F3 = function(a, b) {
+        function F4(p) {
+          return p + a + b + z + w + v.length;
+        }
+        return F4;
+      }
+      return F3(4, 5);
+    }
+  }
+  return F2(17);
+})(5);
+
+var mirror = Debug.MakeMirror(f1);
+
+assertEquals(4, mirror.scopeCount());
+
+CheckScope(mirror.scope(0), { a: 4, b: 5 }, ScopeType.Closure);
+CheckScope(mirror.scope(1), { z: 22, w: 5, v: "Capybara" }, ScopeType.Closure);
+CheckScope(mirror.scope(2), { x: 5 }, ScopeType.Closure);
+CheckScope(mirror.scope(3), {}, ScopeType.Global);
+
+var f2 = (function() {
+  var v1 = 3;
+  var v2 = 4;
+  let l0 = 0;
+  {
+    var v3 = 5;
+    let l1 = 6;
+    let l2 = 7;
+    {
+      var v4 = 8;
+      let l3 = 9;
+      {
+        var v5 = "Cat";
+        let l4 = 11;
+        var v6 = l4;
+        return function() {
+          return l0 + v1 + v3 + l2 + l3 + v6;
+        };
+      }
+    }
+  }
+})();
+
+var mirror = Debug.MakeMirror(f2);
+
+assertEquals(5, mirror.scopeCount());
+
+// Implementation artifact: l4 isn't used in closure, but still it is saved.
+CheckScope(mirror.scope(0), { l4: 11 }, ScopeType.Block);
+
+CheckScope(mirror.scope(1), { l3: 9 }, ScopeType.Block);
+CheckScope(mirror.scope(2), { l1: 6, l2: 7 }, ScopeType.Block);
+CheckScope(mirror.scope(3), { v1:3, l0: 0, v3: 5, v6: 11 }, ScopeType.Closure);
+CheckScope(mirror.scope(4), {}, ScopeType.Global);
diff --git a/src/3rdparty/v8/test/mjsunit/harmony/module-linking.js b/src/3rdparty/v8/test/mjsunit/harmony/module-linking.js
new file mode 100644 (file)
index 0000000..13ca6f7
--- /dev/null
@@ -0,0 +1,121 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-modules --harmony-scoping
+
+// Test basic module linking.
+
+"use strict";
+
+let log = "";
+
+export let x = (log += "1");
+
+export module B = A.B
+
+export module A {
+  export let x = (log += "2");
+  let y = (log += "3");
+  export function f() { log += "5" };
+  export module B {
+    module BB = B;
+    export BB, x;
+    let x = (log += "4");
+    f();
+    let y = (log += "6");
+  }
+  export let z = (log += "7");
+  export module C {
+    export let z = (log += "8");
+    export module D = B
+    export module C = A.C
+  }
+  module D {}
+}
+
+export module M1 {
+  export module A2 = M2;
+  export let x = (log += "9");
+}
+export module M2 {
+  export module A1 = M1;
+  export let x = (log += "0");
+}
+
+assertEquals("object", typeof A);
+assertTrue('x' in A);
+assertFalse('y' in A);
+assertTrue('f' in A);
+assertTrue('B' in A);
+assertTrue('z' in A);
+assertTrue('C' in A);
+assertFalse('D' in A);
+
+assertEquals("object", typeof B);
+assertTrue('BB' in B);
+assertTrue('x' in B);
+assertFalse('y' in B);
+
+assertEquals("object", typeof A.B);
+assertTrue('BB' in A.B);
+assertTrue('x' in A.B);
+assertFalse('y' in A.B);
+
+assertEquals("object", typeof A.B.BB);
+assertTrue('BB' in A.B.BB);
+assertTrue('x' in A.B.BB);
+assertFalse('y' in A.B.BB);
+
+assertEquals("object", typeof A.C);
+assertTrue('z' in A.C);
+assertTrue('D' in A.C);
+assertTrue('C' in A.C);
+
+assertEquals("object", typeof M1);
+assertEquals("object", typeof M2);
+assertTrue('A2' in M1);
+assertTrue('A1' in M2);
+assertEquals("object", typeof M1.A2);
+assertEquals("object", typeof M2.A1);
+assertTrue('A1' in M1.A2);
+assertTrue('A2' in M2.A1);
+assertEquals("object", typeof M1.A2.A1);
+assertEquals("object", typeof M2.A1.A2);
+
+assertSame(B, A.B);
+assertSame(B, B.BB);
+assertSame(B, A.C.D);
+assertSame(A.C, A.C.C);
+assertFalse(A.D === A.C.D);
+
+assertSame(M1, M2.A1);
+assertSame(M2, M1.A2);
+assertSame(M1, M1.A2.A1);
+assertSame(M2, M2.A1.A2);
+
+// TODO(rossberg): inner declarations are not executed yet.
+// assertEquals("1234567890", log);
index 93e69e3..cdd0a2e 100644 (file)
@@ -70,7 +70,7 @@ module B {
 
   import i0 from I
   import i1, i2, i3, M from I
-  import i4, i5 from "http://where"
+  //import i4, i5 from "http://where"
 }
 
 module I {
@@ -85,7 +85,7 @@ module D3 = D2
 
 module E1 at "http://where"
 module E2 at "http://where";
-module E3 = E1.F
+module E3 = E1
 
 // Check that ASI does not interfere.
 
@@ -103,11 +103,11 @@ at
 "file://local"
 
 import
-x
+vx
 ,
-y
+vy
 from
-"file://local"
+B
 
 
 module Wrap {
index f9f492c..a1b9917 100644 (file)
@@ -129,7 +129,7 @@ export module M2 {
 
 export module External at "external.js"
 export module External1 = External
-export module ExternalA = External.A
+//export module ExternalA = External.A
 export module InnerExternal {
   export module E at "external.js"
 }
diff --git a/src/3rdparty/v8/test/mjsunit/math-floor-of-div.js b/src/3rdparty/v8/test/mjsunit/math-floor-of-div.js
new file mode 100644 (file)
index 0000000..e917182
--- /dev/null
@@ -0,0 +1,216 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --nouse_inlining
+
+// Use this function as reference. Make sure it is not inlined.
+function div(a, b) {
+  return a / b;
+}
+
+var limit = 0x1000000;
+var exhaustive_limit = 100;
+var step = 10;
+var values = [0x10000001,
+              0x12345678,
+              -0x789abcdf,  // 0x87654321
+              0x01234567,
+              0x76543210,
+              -0x80000000,  // 0x80000000
+              0x7fffffff,
+              -0x0fffffff,  // 0xf0000001
+              0x00000010,
+              -0x01000000   // 0xff000000
+              ];
+
+function test_div() {
+  var c = 0;
+  for (var k = 0; k <= limit; k++) {
+    if (k > exhaustive_limit) { c += step; k += c; }
+    assertEquals(Math.floor(div(k,   1)), Math.floor(k /   1));
+    assertEquals(Math.floor(div(k,   -1)), Math.floor(k /   -1));
+    assertEquals(Math.floor(div(k,   2)), Math.floor(k /   2));
+    assertEquals(Math.floor(div(k,   -2)), Math.floor(k /   -2));
+    assertEquals(Math.floor(div(k,   3)), Math.floor(k /   3));
+    assertEquals(Math.floor(div(k,   -3)), Math.floor(k /   -3));
+    assertEquals(Math.floor(div(k,   4)), Math.floor(k /   4));
+    assertEquals(Math.floor(div(k,   -4)), Math.floor(k /   -4));
+    assertEquals(Math.floor(div(k,   5)), Math.floor(k /   5));
+    assertEquals(Math.floor(div(k,   -5)), Math.floor(k /   -5));
+    assertEquals(Math.floor(div(k,   6)), Math.floor(k /   6));
+    assertEquals(Math.floor(div(k,   -6)), Math.floor(k /   -6));
+    assertEquals(Math.floor(div(k,   7)), Math.floor(k /   7));
+    assertEquals(Math.floor(div(k,   -7)), Math.floor(k /   -7));
+    assertEquals(Math.floor(div(k,   8)), Math.floor(k /   8));
+    assertEquals(Math.floor(div(k,   -8)), Math.floor(k /   -8));
+    assertEquals(Math.floor(div(k,   9)), Math.floor(k /   9));
+    assertEquals(Math.floor(div(k,   -9)), Math.floor(k /   -9));
+    assertEquals(Math.floor(div(k,  10)), Math.floor(k /  10));
+    assertEquals(Math.floor(div(k,  -10)), Math.floor(k /  -10));
+    assertEquals(Math.floor(div(k,  11)), Math.floor(k /  11));
+    assertEquals(Math.floor(div(k,  -11)), Math.floor(k /  -11));
+    assertEquals(Math.floor(div(k,  12)), Math.floor(k /  12));
+    assertEquals(Math.floor(div(k,  -12)), Math.floor(k /  -12));
+    assertEquals(Math.floor(div(k,  13)), Math.floor(k /  13));
+    assertEquals(Math.floor(div(k,  -13)), Math.floor(k /  -13));
+    assertEquals(Math.floor(div(k,  14)), Math.floor(k /  14));
+    assertEquals(Math.floor(div(k,  -14)), Math.floor(k /  -14));
+    assertEquals(Math.floor(div(k,  15)), Math.floor(k /  15));
+    assertEquals(Math.floor(div(k,  -15)), Math.floor(k /  -15));
+    assertEquals(Math.floor(div(k,  16)), Math.floor(k /  16));
+    assertEquals(Math.floor(div(k,  -16)), Math.floor(k /  -16));
+    assertEquals(Math.floor(div(k,  17)), Math.floor(k /  17));
+    assertEquals(Math.floor(div(k,  -17)), Math.floor(k /  -17));
+    assertEquals(Math.floor(div(k,  18)), Math.floor(k /  18));
+    assertEquals(Math.floor(div(k,  -18)), Math.floor(k /  -18));
+    assertEquals(Math.floor(div(k,  19)), Math.floor(k /  19));
+    assertEquals(Math.floor(div(k,  -19)), Math.floor(k /  -19));
+    assertEquals(Math.floor(div(k,  20)), Math.floor(k /  20));
+    assertEquals(Math.floor(div(k,  -20)), Math.floor(k /  -20));
+    assertEquals(Math.floor(div(k,  21)), Math.floor(k /  21));
+    assertEquals(Math.floor(div(k,  -21)), Math.floor(k /  -21));
+    assertEquals(Math.floor(div(k,  22)), Math.floor(k /  22));
+    assertEquals(Math.floor(div(k,  -22)), Math.floor(k /  -22));
+    assertEquals(Math.floor(div(k,  23)), Math.floor(k /  23));
+    assertEquals(Math.floor(div(k,  -23)), Math.floor(k /  -23));
+    assertEquals(Math.floor(div(k,  24)), Math.floor(k /  24));
+    assertEquals(Math.floor(div(k,  -24)), Math.floor(k /  -24));
+    assertEquals(Math.floor(div(k,  25)), Math.floor(k /  25));
+    assertEquals(Math.floor(div(k,  -25)), Math.floor(k /  -25));
+    assertEquals(Math.floor(div(k, 125)), Math.floor(k / 125));
+    assertEquals(Math.floor(div(k, -125)), Math.floor(k / -125));
+    assertEquals(Math.floor(div(k, 625)), Math.floor(k / 625));
+    assertEquals(Math.floor(div(k, -625)), Math.floor(k / -625));
+  }
+  c = 0;
+  for (var k = 0; k <= limit; k++) {
+    if (k > exhaustive_limit) { c += step; k += c; }
+    assertEquals(Math.floor(div(-k,   1)), Math.floor(-k /   1));
+    assertEquals(Math.floor(div(-k,   -1)), Math.floor(-k /   -1));
+    assertEquals(Math.floor(div(-k,   2)), Math.floor(-k /   2));
+    assertEquals(Math.floor(div(-k,   -2)), Math.floor(-k /   -2));
+    assertEquals(Math.floor(div(-k,   3)), Math.floor(-k /   3));
+    assertEquals(Math.floor(div(-k,   -3)), Math.floor(-k /   -3));
+    assertEquals(Math.floor(div(-k,   4)), Math.floor(-k /   4));
+    assertEquals(Math.floor(div(-k,   -4)), Math.floor(-k /   -4));
+    assertEquals(Math.floor(div(-k,   5)), Math.floor(-k /   5));
+    assertEquals(Math.floor(div(-k,   -5)), Math.floor(-k /   -5));
+    assertEquals(Math.floor(div(-k,   6)), Math.floor(-k /   6));
+    assertEquals(Math.floor(div(-k,   -6)), Math.floor(-k /   -6));
+    assertEquals(Math.floor(div(-k,   7)), Math.floor(-k /   7));
+    assertEquals(Math.floor(div(-k,   -7)), Math.floor(-k /   -7));
+    assertEquals(Math.floor(div(-k,   8)), Math.floor(-k /   8));
+    assertEquals(Math.floor(div(-k,   -8)), Math.floor(-k /   -8));
+    assertEquals(Math.floor(div(-k,   9)), Math.floor(-k /   9));
+    assertEquals(Math.floor(div(-k,   -9)), Math.floor(-k /   -9));
+    assertEquals(Math.floor(div(-k,  10)), Math.floor(-k /  10));
+    assertEquals(Math.floor(div(-k,  -10)), Math.floor(-k /  -10));
+    assertEquals(Math.floor(div(-k,  11)), Math.floor(-k /  11));
+    assertEquals(Math.floor(div(-k,  -11)), Math.floor(-k /  -11));
+    assertEquals(Math.floor(div(-k,  12)), Math.floor(-k /  12));
+    assertEquals(Math.floor(div(-k,  -12)), Math.floor(-k /  -12));
+    assertEquals(Math.floor(div(-k,  13)), Math.floor(-k /  13));
+    assertEquals(Math.floor(div(-k,  -13)), Math.floor(-k /  -13));
+    assertEquals(Math.floor(div(-k,  14)), Math.floor(-k /  14));
+    assertEquals(Math.floor(div(-k,  -14)), Math.floor(-k /  -14));
+    assertEquals(Math.floor(div(-k,  15)), Math.floor(-k /  15));
+    assertEquals(Math.floor(div(-k,  -15)), Math.floor(-k /  -15));
+    assertEquals(Math.floor(div(-k,  16)), Math.floor(-k /  16));
+    assertEquals(Math.floor(div(-k,  -16)), Math.floor(-k /  -16));
+    assertEquals(Math.floor(div(-k,  17)), Math.floor(-k /  17));
+    assertEquals(Math.floor(div(-k,  -17)), Math.floor(-k /  -17));
+    assertEquals(Math.floor(div(-k,  18)), Math.floor(-k /  18));
+    assertEquals(Math.floor(div(-k,  -18)), Math.floor(-k /  -18));
+    assertEquals(Math.floor(div(-k,  19)), Math.floor(-k /  19));
+    assertEquals(Math.floor(div(-k,  -19)), Math.floor(-k /  -19));
+    assertEquals(Math.floor(div(-k,  20)), Math.floor(-k /  20));
+    assertEquals(Math.floor(div(-k,  -20)), Math.floor(-k /  -20));
+    assertEquals(Math.floor(div(-k,  21)), Math.floor(-k /  21));
+    assertEquals(Math.floor(div(-k,  -21)), Math.floor(-k /  -21));
+    assertEquals(Math.floor(div(-k,  22)), Math.floor(-k /  22));
+    assertEquals(Math.floor(div(-k,  -22)), Math.floor(-k /  -22));
+    assertEquals(Math.floor(div(-k,  23)), Math.floor(-k /  23));
+    assertEquals(Math.floor(div(-k,  -23)), Math.floor(-k /  -23));
+    assertEquals(Math.floor(div(-k,  24)), Math.floor(-k /  24));
+    assertEquals(Math.floor(div(-k,  -24)), Math.floor(-k /  -24));
+    assertEquals(Math.floor(div(-k,  25)), Math.floor(-k /  25));
+    assertEquals(Math.floor(div(-k,  -25)), Math.floor(-k /  -25));
+    assertEquals(Math.floor(div(-k, 125)), Math.floor(-k / 125));
+    assertEquals(Math.floor(div(-k, -125)), Math.floor(-k / -125));
+    assertEquals(Math.floor(div(-k, 625)), Math.floor(-k / 625));
+    assertEquals(Math.floor(div(-k, -625)), Math.floor(-k / -625));
+  }
+  // Test for edge cases.
+  // Use (values[key] | 0) to force the integer type.
+  for (var i = 0; i < values.length; i++) {
+    for (var j = 0; j < values.length; j++) {
+      assertEquals(Math.floor(div((values[i] | 0), (values[j] | 0))),
+                   Math.floor((values[i] | 0) / (values[j] | 0)));
+      assertEquals(Math.floor(div(-(values[i] | 0), (values[j] | 0))),
+                   Math.floor(-(values[i] | 0) / (values[j] | 0)));
+      assertEquals(Math.floor(div((values[i] | 0), -(values[j] | 0))),
+                   Math.floor((values[i] | 0) / -(values[j] | 0)));
+      assertEquals(Math.floor(div(-(values[i] | 0), -(values[j] | 0))),
+                   Math.floor(-(values[i] | 0) / -(values[j] | 0)));
+    }
+  }
+}
+
+test_div();
+%OptimizeFunctionOnNextCall(test_div);
+test_div();
+
+// Test for negative zero and overflow.
+// Separate the tests to prevent deoptimizations from making the other optimized
+// test unreachable.
+
+function IsNegativeZero(x) {
+  assertTrue(x == 0);  // Is 0 or -0.
+  var y = 1 / x;
+  assertFalse(isFinite(y));
+  return y < 0;
+}
+
+function test_div_deopt_minus_zero() {
+  var zero_in_array = [0];
+  assertTrue(IsNegativeZero(Math.floor((zero_in_array[0] | 0) / -1)));
+}
+
+function test_div_deopt_overflow() {
+  // We box the value in an array to avoid constant propagation.
+  var min_int_in_array = [-2147483648];
+  // We use '| 0' to force the representation to int32.
+  assertEquals(-min_int_in_array[0],
+               Math.floor((min_int_in_array[0] | 0) / -1));
+}
+
+test_div_deopt_minus_zero();
+test_div_deopt_overflow();
+%OptimizeFunctionOnNextCall(test_div_deopt_minus_zero);
+%OptimizeFunctionOnNextCall(test_div_deopt_overflow);
+test_div_deopt_minus_zero();
+test_div_deopt_overflow();
index 033c78f..65fb301 100644 (file)
@@ -75,7 +75,7 @@ var assertTrue;
 // Checks that the found value is false.
 var assertFalse;
 
-// Checks that the found value is null. Kept for historical compatability,
+// Checks that the found value is null. Kept for historical compatibility,
 // please just use assertEquals(null, expected).
 var assertNull;
 
index a1b9270..ab5f2e3 100644 (file)
@@ -64,6 +64,7 @@ regress/regress-524: (PASS || TIMEOUT), SKIP if $mode == debug
 # Stack manipulations in LiveEdit are buggy - see bug 915
 debug-liveedit-check-stack: SKIP
 debug-liveedit-patch-positions-replace: SKIP
+debug-liveedit-stack-padding: SKIP
 
 # Test Crankshaft compilation time.  Expected to take too long in debug mode.
 regress/regress-1969: PASS, SKIP if $mode == debug
diff --git a/src/3rdparty/v8/test/mjsunit/regexp-capture-3.js b/src/3rdparty/v8/test/mjsunit/regexp-capture-3.js
new file mode 100644 (file)
index 0000000..9bdd600
--- /dev/null
@@ -0,0 +1,218 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function oneMatch(re) {
+  "abcd".replace(re, function() { });
+  assertEquals("abcd", RegExp.input);
+  assertEquals("a", RegExp.leftContext);
+  assertEquals("b", RegExp.lastMatch);
+  assertEquals("", RegExp.lastParen);
+  assertEquals(undefined, RegExp.lastIndex);
+  assertEquals(undefined, RegExp.index);
+  assertEquals("cd", RegExp.rightContext);
+  for (var i = 1; i < 10; i++) {
+    assertEquals("", RegExp['$' + i]);
+  }
+}
+
+oneMatch(/b/);
+oneMatch(/b/g);
+
+"abcdabcd".replace(/b/g, function() { });
+assertEquals("abcdabcd", RegExp.input);
+assertEquals("abcda", RegExp.leftContext);
+assertEquals("b", RegExp.lastMatch);
+assertEquals("", RegExp.lastParen);
+assertEquals(undefined, RegExp.lastIndex);
+assertEquals(undefined, RegExp.index);
+assertEquals("cd", RegExp.rightContext);
+for (var i = 1; i < 10; i++) {
+  assertEquals("", RegExp['$' + i]);
+}
+
+function captureMatch(re) {
+  "abcd".replace(re, function() { });
+  assertEquals("abcd", RegExp.input);
+  assertEquals("a", RegExp.leftContext);
+  assertEquals("bc", RegExp.lastMatch);
+  assertEquals("c", RegExp.lastParen);
+  assertEquals(undefined, RegExp.lastIndex);
+  assertEquals(undefined, RegExp.index);
+  assertEquals("d", RegExp.rightContext);
+  assertEquals('b', RegExp.$1);
+  assertEquals('c', RegExp.$2);
+  for (var i = 3; i < 10; i++) {
+    assertEquals("", RegExp['$' + i]);
+  }
+}
+
+captureMatch(/(b)(c)/);
+captureMatch(/(b)(c)/g);
+
+"abcdabcd".replace(/(b)(c)/g, function() { });
+assertEquals("abcdabcd", RegExp.input);
+assertEquals("abcda", RegExp.leftContext);
+assertEquals("bc", RegExp.lastMatch);
+assertEquals("c", RegExp.lastParen);
+assertEquals(undefined, RegExp.lastIndex);
+assertEquals(undefined, RegExp.index);
+assertEquals("d", RegExp.rightContext);
+assertEquals('b', RegExp.$1);
+assertEquals('c', RegExp.$2);
+for (var i = 3; i < 10; i++) {
+  assertEquals("", RegExp['$' + i]);
+}
+
+
+function Override() {
+  // Set the internal lastMatchInfoOverride.  After calling this we do a normal
+  // match and verify the override was cleared and that we record the new
+  // captures.
+  "abcdabcd".replace(/(b)(c)/g, function() { });
+}
+
+
+function TestOverride(input, expect, property, re_src) {
+  var re = new RegExp(re_src);
+  var re_g = new RegExp(re_src, "g");
+
+  function OverrideCase(fn) {
+    Override();
+    fn();
+    assertEquals(expect, RegExp[property]);
+  }
+
+  OverrideCase(function() { return input.replace(re, "x"); });
+  OverrideCase(function() { return input.replace(re_g, "x"); });
+  OverrideCase(function() { return input.replace(re, ""); });
+  OverrideCase(function() { return input.replace(re_g, ""); });
+  OverrideCase(function() { return input.match(re); });
+  OverrideCase(function() { return input.match(re_g); });
+  OverrideCase(function() { return re.test(input); });
+  OverrideCase(function() { return re_g.test(input); });
+}
+
+var input = "bar.foo baz......";
+var re_str = "(ba.).*?f";
+TestOverride(input, "bar", "$1", re_str);
+
+input = "foo bar baz";
+var re_str = "bar";
+TestOverride(input, "bar", "$&", re_str);
+
+
+function no_last_match(fn) {
+  fn();
+  assertEquals("hestfisk", RegExp.$1);
+}
+
+/(hestfisk)/.test("There's no such thing as a hestfisk!");
+
+no_last_match(function() { "foo".replace("f", ""); });
+no_last_match(function() { "foo".replace("f", "f"); });
+no_last_match(function() { "foo".split("o"); });
+
+var base = "In the music.  In the music.  ";
+var cons = base + base + base + base;
+no_last_match(function() { cons.replace("x", "y"); });
+no_last_match(function() { cons.replace("e", "E"); });
+
+
+// Here's one that matches once, then tries to match again, but fails.
+// Verify that the last match info is from the last match, not from the
+// failure that came after.
+"bar.foo baz......".replace(/(ba.).*?f/g, function() { return "x";});
+assertEquals("bar", RegExp.$1);
+
+
+// A test that initially does a zero width match, but later does a non-zero
+// width match.
+var a = "foo bar baz".replace(/^|bar/g, "");
+assertEquals("foo  baz", a);
+
+a = "foo bar baz".replace(/^|bar/g, "*");
+assertEquals("*foo * baz", a);
+
+// We test FilterASCII using regexps that will backtrack forever.  Since
+// a regexp with a non-ASCII character in it can never match an ASCII
+// string we can test that the relevant node is removed by verifying that
+// there is no hang.
+function NoHang(re) {
+  print(re);
+  "This is an ASCII string that could take forever".match(re);
+}
+
+
+NoHang(/(((.*)*)*x)Ã¥/);  // Continuation after loop is filtered, so is loop.
+NoHang(/(((.*)*)*Ã¥)foo/);  // Body of loop filtered.
+NoHang(/Ã¥(((.*)*)*x)/);   // Everything after a filtered character is filtered.
+NoHang(/(((.*)*)*x)Ã¥/);   // Everything before a filtered character is filtered.
+NoHang(/[æøå](((.*)*)*x)/);   // Everything after a filtered class is filtered.
+NoHang(/(((.*)*)*x)[æøå]/);   // Everything before a filtered class is filtered.
+NoHang(/[^\x00-\x7f](((.*)*)*x)/);   // After negated class.
+NoHang(/(((.*)*)*x)[^\x00-\x7f]/);   // Before negated class.
+NoHang(/(?!(((.*)*)*x)Ã¥)foo/);  // Negative lookahead is filtered.
+NoHang(/(?!(((.*)*)*x))Ã¥/);  // Continuation branch of negative lookahead.
+NoHang(/(?=(((.*)*)*x)Ã¥)foo/);  // Positive lookahead is filtered.
+NoHang(/(?=(((.*)*)*x))Ã¥/);  // Continuation branch of positive lookahead.
+NoHang(/(?=Ã¥)(((.*)*)*x)/);  // Positive lookahead also prunes continuation.
+NoHang(/(æ|ø|Ã¥)(((.*)*)*x)/);  // All branches of alternation are filtered.
+NoHang(/(a|b|(((.*)*)*x))Ã¥/);  // 1 out of 3 branches pruned.
+NoHang(/(a|(((.*)*)*x)ø|(((.*)*)*x)Ã¥)/);  // 2 out of 3 branches pruned.
+
+var s = "Don't prune based on a repetition of length 0";
+assertEquals(null, s.match(/Ã¥{1,1}prune/));
+assertEquals("prune", (s.match(/Ã¥{0,0}prune/)[0]));
+
+// Some very deep regexps where FilterASCII gives up in order not to make the
+// stack overflow.
+var regex6 = /a*\u0100*\w/;
+var input0 = "a";
+regex6.exec(input0);
+
+var re = "\u0100*\\w";
+
+for (var i = 0; i < 200; i++) re = "a*" + re;
+
+var regex7 = new RegExp(re);
+regex7.exec(input0);
+
+var regex8 = new RegExp(re, "i");
+regex8.exec(input0);
+
+re = "[\u0100]*\\w";
+for (var i = 0; i < 200; i++) re = "a*" + re;
+
+var regex9 = new RegExp(re);
+regex9.exec(input0);
+
+var regex10 = new RegExp(re, "i");
+regex10.exec(input0);
+
+var regex11 = /^(?:[^\u0000-\u0080]|[0-9a-z?,.!&\s#()])+$/i;
+regex11.exec(input0);
+
index 16b2e4f..5fd8f36 100644 (file)
 // Test runtime declaration of properties with var which are intercepted
 // by JS accessors.
 
-__proto__.__defineSetter__("x", function() { hasBeenInvoked = true; });
-__proto__.__defineSetter__("y", function() { throw 'exception'; });
+// Flags: --es52_globals
+
+this.__defineSetter__("x", function() { hasBeenInvoked = true; });
+this.__defineSetter__("y", function() { throw 'exception'; });
 
 var hasBeenInvoked = false;
 eval("try { } catch (e) { var x = false; }");
 assertTrue(hasBeenInvoked);
 
-var exception;
+// This has to run in global scope, so cannot use assertThrows...
 try {
   eval("try { } catch (e) { var y = false; }");
+  assertUnreachable();
 } catch (e) {
-  exception = e;
+  assertEquals('exception', e);
 }
-assertEquals('exception', exception);
index 7e424ed..dc71158 100644 (file)
 
 // Test that a function declaration cannot overwrite a read-only property.
 
-print(0)
+// Flags: --es52_globals
+
 function foobl() {}
 assertTrue(typeof this.foobl == "function");
 assertTrue(Object.getOwnPropertyDescriptor(this, "foobl").writable);
 
-print(1)
 Object.defineProperty(this, "foobl", {value: 1, writable: false});
 assertSame(1, this.foobl);
 assertFalse(Object.getOwnPropertyDescriptor(this, "foobl").writable);
 
-print(2)
-eval("function foobl() {}");
+// This has to run in global scope, so cannot use assertThrows...
+try {
+  eval("function foobl() {}");  // Should throw.
+  assertUnreachable();
+} catch (e) {
+  assertInstanceof(e, TypeError);
+}
 assertSame(1, this.foobl);
-assertFalse(Object.getOwnPropertyDescriptor(this, "foobl").writable);
-
-print(3)
-eval("function foobl() {}");
-assertSame(1, this.foobl);
-assertFalse(Object.getOwnPropertyDescriptor(this, "foobl").writable);
index 66ed9f2..8c5f6f8 100644 (file)
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+// Flags: --es52_globals
+
 var setter_value = 0;
 
-__proto__.__defineSetter__("a", function(v) { setter_value = v; });
+this.__defineSetter__("a", function(v) { setter_value = v; });
 eval("var a = 1");
 assertEquals(1, setter_value);
-assertFalse(this.hasOwnProperty("a"));
+assertFalse("value" in Object.getOwnPropertyDescriptor(this, "a"));
 
 eval("with({}) { eval('var a = 2') }");
 assertEquals(2, setter_value);
-assertFalse(this.hasOwnProperty("a"));
+assertFalse("value" in Object.getOwnPropertyDescriptor(this, "a"));
 
 // Function declarations are treated specially to match Safari. We do
 // not call setters for them.
+this.__defineSetter__("a", function(v) { assertUnreachable(); });
 eval("function a() {}");
-assertTrue(this.hasOwnProperty("a"));
+assertTrue("value" in Object.getOwnPropertyDescriptor(this, "a"));
 
-__proto__.__defineSetter__("b", function(v) { assertUnreachable(); });
-var exception = false;
+this.__defineSetter__("b", function(v) { setter_value = v; });
 try {
-  eval("const b = 23");
+  eval("const b = 3");
 } catch(e) {
-  exception = true;
-  assertTrue(/TypeError/.test(e));
+  assertUnreachable();
 }
-assertFalse(exception);
+assertEquals(3, setter_value);
 
-exception = false;
 try {
   eval("with({}) { eval('const b = 23') }");
 } catch(e) {
-  exception = true;
-  assertTrue(/TypeError/.test(e));
+  assertInstanceof(e, TypeError);
 }
-assertTrue(exception);
 
-__proto__.__defineSetter__("c", function(v) { throw 42; });
-exception = false;
+this.__defineSetter__("c", function(v) { throw 42; });
 try {
   eval("var c = 1");
+  assertUnreachable();
 } catch(e) {
-  exception = true;
   assertEquals(42, e);
-  assertFalse(this.hasOwnProperty("c"));
+  assertFalse("value" in Object.getOwnPropertyDescriptor(this, "c"));
+}
+
+
+
+
+__proto__.__defineSetter__("aa", function(v) { assertUnreachable(); });
+eval("var aa = 1");
+assertTrue(this.hasOwnProperty("aa"));
+
+__proto__.__defineSetter__("bb", function(v) { assertUnreachable(); });
+eval("with({}) { eval('var bb = 2') }");
+assertTrue(this.hasOwnProperty("bb"));
+
+// Function declarations are treated specially to match Safari. We do
+// not call setters for them.
+__proto__.__defineSetter__("cc", function(v) { assertUnreachable(); });
+eval("function cc() {}");
+assertTrue(this.hasOwnProperty("cc"));
+
+__proto__.__defineSetter__("dd", function(v) { assertUnreachable(); });
+try {
+  eval("const dd = 23");
+} catch(e) {
+  assertUnreachable();
+}
+
+try {
+  eval("with({}) { eval('const dd = 23') }");
+} catch(e) {
+  assertInstanceof(e, TypeError);
 }
-assertTrue(exception);
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-117409.js b/src/3rdparty/v8/test/mjsunit/regress/regress-117409.js
new file mode 100644 (file)
index 0000000..9222191
--- /dev/null
@@ -0,0 +1,52 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-gc
+
+function KeyedStoreIC(a) { a[0] = Math.E; }
+
+// Create literal with a fast double elements backing store
+var literal = [1.2];
+
+// Specialize the IC for fast double elements
+KeyedStoreIC(literal);
+KeyedStoreIC(literal);
+
+// Trruncate array to 0 elements, at which point backing store will be replaced
+// with empty fixed array.
+literal.length = 0;
+
+// ArrayPush built-in will replace empty fixed array backing store with 19
+// elements fixed array backing store.  This leads to a mismatch between the map
+// and the backing store.  Debug mode will crash here in set_elements accessor.
+literal.push(Math.E, Math.E);
+
+// Corrupt the backing store!
+KeyedStoreIC(literal);
+
+// Release mode will crash here when trying to visit parts of E as pointers.
+gc();
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-119609.js b/src/3rdparty/v8/test/mjsunit/regress/regress-119609.js
new file mode 100644 (file)
index 0000000..99041ad
--- /dev/null
@@ -0,0 +1,71 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+
+Debug = debug.Debug;
+
+var exception = false;
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+      function lookup(name) {
+        return exec_state.frame(0).evaluate(name).value();
+      }
+
+      assertEquals(3, lookup("e"));
+      assertEquals(4, lookup("f"));
+      assertEquals(1, lookup("a"));
+
+      try {
+        assertEquals(2, lookup("b"));
+      } catch (e) {
+        assertEquals("ReferenceError: b is not defined", e.toString());
+      }
+    }
+  } catch (e) {
+    exception = e.toString() + e.stack;
+  }
+}
+
+Debug.setListener(listener);
+
+function f(a, b) {
+  var c = 3;
+  function d(e, f) {
+    var g = a;
+    var h = c;
+    debugger;
+  }
+
+  return d;
+}
+
+f(1, 2)(3, 4);
+
+assertFalse(exception);
index 6530549..e00d537 100644 (file)
@@ -30,7 +30,7 @@
 var proto = RegExp.prototype;
 assertEquals("[object RegExp]", Object.prototype.toString.call(proto));
 
-assertEquals("", proto.source);
+assertEquals("(?:)", proto.source);
 assertEquals(false, proto.global);
 assertEquals(false, proto.multiline);
 assertEquals(false, proto.ignoreCase);
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-123512.js b/src/3rdparty/v8/test/mjsunit/regress/regress-123512.js
new file mode 100644 (file)
index 0000000..8a747bc
--- /dev/null
@@ -0,0 +1,78 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Test that boilerplate objects for array literals with non-constant
+// elements (which will contain the hole at non-constant positions) will
+// not cause prototype chain lookups when generating optimized code.
+
+function f(x) {
+  return [x][0];
+}
+
+// Test data element on prototype.
+Object.prototype[0] = 23;
+assertSame(1, f(1));
+assertSame(2, f(2));
+%OptimizeFunctionOnNextCall(f);
+assertSame(3, f(3));
+%DeoptimizeFunction(f);
+
+// Test accessor element on prototype.
+Object.prototype.__defineGetter__(0, function() { throw Error(); });
+assertSame(4, f(4));
+assertSame(5, f(5));
+%OptimizeFunctionOnNextCall(f);
+assertSame(6, f(6));
+%DeoptimizeFunction(f);
+
+// Test the same on boilerplate objects for object literals that contain
+// both non-constant properties and non-constant elements.
+
+function g(x, y) {
+  var o = { foo:x, 0:y };
+  return o.foo + o[0];
+}
+
+// Test data property and element on prototype.
+Object.prototype[0] = 23;
+Object.prototype.foo = 42;
+assertSame(3, g(1, 2));
+assertSame(5, g(2, 3));
+%OptimizeFunctionOnNextCall(g);
+assertSame(7, g(3, 4));
+%DeoptimizeFunction(g);
+
+// Test accessor property and element on prototype.
+Object.prototype.__defineGetter__(0, function() { throw Error(); });
+Object.prototype.__defineGetter__('foo', function() { throw Error(); });
+assertSame(3, g(1, 2));
+assertSame(5, g(2, 3));
+%OptimizeFunctionOnNextCall(g);
+assertSame(7, g(3, 4));
+%DeoptimizeFunction(g);
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-123919.js b/src/3rdparty/v8/test/mjsunit/regress/regress-123919.js
new file mode 100644 (file)
index 0000000..be34608
--- /dev/null
@@ -0,0 +1,47 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --gc-global
+
+function g(max,val) {
+  this.x = 0;
+  for (var i = 0; i < max; i++) {
+    this.x = i/100;
+  }
+  this.val = val;
+}
+
+function f(max) {
+  var val = 0.5;
+  var obj = new g(max,val);
+  assertSame(val, obj.val);
+}
+
+f(1);
+f(1);
+%OptimizeFunctionOnNextCall(f);
+f(200000);
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-124594.js b/src/3rdparty/v8/test/mjsunit/regress/regress-124594.js
new file mode 100644 (file)
index 0000000..d51e1f6
--- /dev/null
@@ -0,0 +1,50 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+// Test that a GC inside a constructor frame is correctly handled right
+// after we deoptimize from an inlined constructor to a constructor stub
+// stack frame.
+
+function f(deopt) {
+  var x = 1;
+  if (deopt) {
+    x = x + "foo";
+    gc();
+  }
+  this.x = x;
+}
+
+function g(deopt) {
+  return new f(deopt);
+}
+
+assertEquals({x:1}, g(false));
+assertEquals({x:1}, g(false));
+%OptimizeFunctionOnNextCall(g);
+assertEquals({x:"1foo"}, g(true));
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-125515.js b/src/3rdparty/v8/test/mjsunit/regress/regress-125515.js
new file mode 100644 (file)
index 0000000..91650ce
--- /dev/null
@@ -0,0 +1,41 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-gc --debug-code
+
+function test(a) {
+  a[0] = 1.5;
+  assertEquals(0, a.length = 0);
+}
+a = new Array();
+test(a);
+test(a);
+// Make sure that a ends up in old space
+gc();
+gc();
+test(a);
+test(a);
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-126412.js b/src/3rdparty/v8/test/mjsunit/regress/regress-126412.js
new file mode 100644 (file)
index 0000000..0677f70
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"".match(/(A{9999999999}B|C*)*D/);
+"C".match(/(A{9999999999}B|C*)*D/);
+"".match(/(A{9999999999}B|C*)*/ );
+"C".match(/(A{9999999999}B|C*)*/ );
+"".match(/(9u|(2\`shj{2147483649,}\r|3|f|y|3*)+8\B)\W93+/);
+"9u8 ".match(/(9u|(2\`shj{2147483649,}\r|3|f|y|3*)+8\B)\W93+/);
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-128018.js b/src/3rdparty/v8/test/mjsunit/regress/regress-128018.js
new file mode 100644 (file)
index 0000000..7bd1585
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-gc
+
+function KeyedStoreIC(a) { a[(1)] = Math.E; }
+var literal = [1.2];
+literal.length = 0;
+literal.push('0' && 0 );
+KeyedStoreIC(literal);
+gc();
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-128146.js b/src/3rdparty/v8/test/mjsunit/regress/regress-128146.js
new file mode 100644 (file)
index 0000000..5c22b4e
--- /dev/null
@@ -0,0 +1,38 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Object.defineProperty({},"foo",{set:function(){},configurable:false});
+Object.defineProperty({},"foo",{get:function(){},configurable:false});
+Object.defineProperty({},"foo",{});
+
+// From WebKit layout tests (fast/js/prototypes.html)
+var wasSet = false;
+var o = { };
+o.__defineGetter__("__proto__", function() { wasSet = true });
+o.__proto__;
+assertFalse(wasSet);
+
index c439dd8..01f0dc2 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,7 @@
 // Flags: --expose-debug-as debug
 // Get the Debug object exposed from the debug context global object.
 Debug = debug.Debug
+var exception = false;
 
 function sendCommand(state, cmd) {
   // Get the debug command processor in paused state.
@@ -79,6 +80,7 @@ function listener(event, exec_state, event_data, data) {
     }
   } catch (e) {
     print(e);
+    exception = true;
   }
 }
 
@@ -91,3 +93,4 @@ function a() {
 // Set a break point and call to invoke the debug event listener.
 Debug.setBreakPoint(a, 0, 0);
 a();
+assertFalse(exception);
index ed68c97..47cdbc4 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,6 +29,7 @@
 // Get the Debug object exposed from the debug context global object.
 Debug = debug.Debug
 var breaks = 0;
+var exception = false;
 
 function sendCommand(state, cmd) {
   // Get the debug command processor in paused state.
@@ -47,15 +48,18 @@ function listener(event, exec_state, event_data, data) {
                    "should not break on unexpected lines")
       assertEquals('BREAK ' + breaks, line.substr(-7));
       breaks++;
-      sendCommand(exec_state, {
-        seq: 0,
-        type: "request",
-        command: "continue",
-        arguments: { stepaction: "next" }
-      });
+      if (breaks < 4) {
+        sendCommand(exec_state, {
+          seq: 0,
+          type: "request",
+          command: "continue",
+          arguments: { stepaction: "next" }
+        });
+      }
     }
   } catch (e) {
     print(e);
+    exception = true;
   }
 }
 
@@ -82,4 +86,6 @@ function c() {
 // Set a break point and call to invoke the debug event listener.
 Debug.setBreakPoint(b, 0, 0);
 a(b);
-// BREAK 3
+a(); // BREAK 3
+
+assertFalse(exception);
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-2071.js b/src/3rdparty/v8/test/mjsunit/regress/regress-2071.js
new file mode 100644 (file)
index 0000000..91ae2a7
--- /dev/null
@@ -0,0 +1,79 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+a = {};
+
+a.b = 42;
+
+with(a) {
+  a.f = (function f1() {
+    function f2() {
+      return b;
+    };
+    return f2;
+  })();
+}
+
+for(var i = 0; i < 10000; i++) {
+  assertEquals(42, a.f());
+}
+
+with(a) {
+  a.g = (function f1() {
+    function f2() {
+      function f3() {
+        return b;
+      }
+      return f3;
+    };
+    return f2();
+  })();
+}
+
+for(var i = 0; i < 10000; i++) {
+  assertEquals(42, a.g());
+}
+
+function outer() {
+  with(a) {
+    a.h = (function f1() {
+      function f2() {
+        function f3() {
+          return b;
+        }
+        return f3;
+      };
+      return f2();
+    })();
+  }
+};
+
+outer();
+
+for(var i = 0; i < 10000; i++) {
+  assertEquals(42, a.h());
+}
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-2110.js b/src/3rdparty/v8/test/mjsunit/regress/regress-2110.js
new file mode 100644 (file)
index 0000000..d7f78d2
--- /dev/null
@@ -0,0 +1,53 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var uint8 = new Uint8Array(1);
+
+function test() {
+  uint8[0] = 0x800000aa;
+  assertEquals(0xaa, uint8[0]);
+}
+
+test();
+test();
+test();
+%OptimizeFunctionOnNextCall(test);
+test();
+
+var uint32 = new Uint32Array(1);
+
+function test2() {
+  uint32[0] = 0x80123456789abcde;
+  assertEquals(0x789ac000, uint32[0]);
+}
+
+test2();
+test2();
+%OptimizeFunctionOnNextCall(test2);
+test2();
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-122271.js b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-122271.js
new file mode 100644 (file)
index 0000000..3a99a7f
--- /dev/null
@@ -0,0 +1,49 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Tests that ElementsKind transitions and regular transitions don't
+// interfere badly with each other.
+
+var a = [0, 0, 0, 1];
+var b = [0, 0, 0, "one"];
+var c = [0, 0, 0, 1];
+c.foo = "baz";
+
+function foo(array) {
+  array.foo = "bar";
+}
+
+assertTrue(%HasFastSmiOnlyElements(a));
+assertTrue(%HasFastElements(b));
+
+foo(a);
+foo(b);
+
+assertTrue(%HasFastSmiOnlyElements(a));
+assertTrue(%HasFastElements(b));
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-126414.js b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-126414.js
new file mode 100644 (file)
index 0000000..6674267
--- /dev/null
@@ -0,0 +1,32 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function foo(bar)  {
+  return arguments[bar];
+}
+foo(0);           // Handled in runtime.
+foo(-536870912);  // Triggers bug.
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-fast-literal-transition.js b/src/3rdparty/v8/test/mjsunit/regress/regress-fast-literal-transition.js
new file mode 100644 (file)
index 0000000..72110f5
--- /dev/null
@@ -0,0 +1,62 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --always-opt --expose-gc
+
+// Test that the elements kind of the boilerplate object is sufficiently
+// checked in LFastLiteral, so that unoptimized code can transition the
+// boilerplate. The --always-opt flag makes sure that optimized code is
+// not thrown away at deoptimization.
+
+// The switch statement in f() makes sure that f() is not inlined. If we
+// start inlining switch statements, we will still catch the bug on the
+// final --stress-opt run.
+
+function f(x) {
+  switch(x) {
+    case 1: return 1.4;
+    case 2: return 1.5;
+    case 3: return {};
+    default: gc();
+  }
+}
+
+function g(x) {
+  return [1.1, 1.2, 1.3, f(x)];
+}
+
+// Step 1: Optimize g() to contain a FAST_DOUBLE_ELEMENTS boilerplate.
+assertEquals([1.1, 1.2, 1.3, 1.4], g(1));
+assertEquals([1.1, 1.2, 1.3, 1.5], g(2));
+%OptimizeFunctionOnNextCall(g);
+
+// Step 2: Deoptimize g() and transition to FAST_ELEMENTS boilerplate.
+assertEquals([1.1, 1.2, 1.3, {}], g(3));
+
+// Step 3: Cause a GC while broken clone of boilerplate is on the heap,
+// hence causing heap verification to catch it.
+assertEquals([1.1, 1.2, 1.3, undefined], g(4));
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-transcendental.js b/src/3rdparty/v8/test/mjsunit/regress/regress-transcendental.js
new file mode 100644 (file)
index 0000000..b5dbcb4
--- /dev/null
@@ -0,0 +1,49 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-gc
+
+// Test whether the runtime implementation and generated code of
+// sine and tangens return the same results.
+
+function test(f, x, name) {
+  // Reset transcendental cache.
+  gc();
+  // Initializing cache leads to a runtime call.
+  var runtime_result = f(x);
+  // Flush transcendental cache entries and optimize f.
+  for (var i = 0; i < 100000; i++) f(i);
+  // Calculate using generated code.
+  var gencode_result = f(x);
+  print(name + " runtime function: " + runtime_result);
+  print(name + " generated code  : " + gencode_result);
+  assertEquals(gencode_result, runtime_result);
+}
+
+test(Math.tan, -1.57079632679489660000, "Math.tan");
+test(Math.sin, 6.283185307179586, "Math.sin");
+
index e64959a..c30be5e 100644 (file)
@@ -592,6 +592,20 @@ js1_5/Regress/regress-416737-01: FAIL_OK
 js1_5/Regress/regress-416737-02: FAIL_OK
 
 
+# Illegal escape-sequences in string literals. Has already been fixed
+# by most engines (i.e. V8, JSC, Opera and FF).
+ecma/Array/15.4.5.1-1: FAIL_OK
+ecma/LexicalConventions/7.7.4: FAIL_OK
+ecma_2/RegExp/hex-001: FAIL_OK
+js1_2/regexp/hexadecimal: FAIL_OK
+
+
+# The source field of RegExp objects is properly escaped. We match JSC.
+ecma_2/RegExp/constructor-001: FAIL_OK
+ecma_2/RegExp/function-001: FAIL_OK
+ecma_2/RegExp/properties-001: FAIL_OK
+
+
 ##################### FAILING TESTS #####################
 
 # This section is for tests that fail in V8 and pass in JSC.
index a4c7d57..52d126e 100644 (file)
@@ -52,36 +52,14 @@ S15.10.2.11_A1_T3: FAIL
 
 # We are more lenient in which string character escapes we allow than
 # the spec (7.8.4 p. 19) wants us to be.  This is for compatibility.
-S7.8.4_A4.3_T2: FAIL_OK
-S7.8.4_A4.3_T2: FAIL_OK
-S7.8.4_A6.2_T2: FAIL_OK
-S7.8.4_A6.1_T4: FAIL_OK
-S7.8.4_A4.3_T4: FAIL_OK
-S7.8.4_A7.2_T2: FAIL_OK
-S7.8.4_A7.1_T4: FAIL_OK
-S7.8.4_A6.4_T2: FAIL_OK
-S7.8.4_A7.4_T2: FAIL_OK
-S7.8.4_A7.2_T4: FAIL_OK
-S7.8.4_A4.3_T6: FAIL_OK
-S7.8.4_A7.2_T6: FAIL_OK
-S7.8.4_A4.3_T1: FAIL_OK
-S7.8.4_A6.2_T1: FAIL_OK
-S7.8.4_A4.3_T3: FAIL_OK
-S7.8.4_A7.2_T1: FAIL_OK
-S7.8.4_A6.4_T1: FAIL_OK
-S7.8.4_A7.2_T3: FAIL_OK
-S7.8.4_A7.4_T1: FAIL_OK
-S7.8.4_A4.3_T5: FAIL_OK
-S7.8.4_A7.2_T5: FAIL_OK
 S7.8.4_A4.3_T1: FAIL_OK
-S7.8.4_A6.2_T1: FAIL_OK
+S7.8.4_A4.3_T2: FAIL_OK
 S7.8.4_A4.3_T3: FAIL_OK
-S7.8.4_A7.2_T1: FAIL_OK
+S7.8.4_A4.3_T4: FAIL_OK
 S7.8.4_A6.4_T1: FAIL_OK
-S7.8.4_A7.2_T3: FAIL_OK
+S7.8.4_A6.4_T2: FAIL_OK
 S7.8.4_A7.4_T1: FAIL_OK
-S7.8.4_A4.3_T5: FAIL_OK
-S7.8.4_A7.2_T5: FAIL_OK
+S7.8.4_A7.4_T2: FAIL_OK
 
 # Sputnik expects unicode escape sequences in RegExp flags to be interpreted.
 # The specification requires them to be passed uninterpreted to the RegExp
@@ -146,6 +124,16 @@ S15.3.4.2_A1_T1: FAIL_OK
 S8.5_A2.2: PASS, FAIL if $system == linux, FAIL if $system == macos
 S8.5_A2.1: PASS, FAIL if $system == linux, FAIL if $system == macos
 
+# The source field of RegExp objects is properly escaped. We match JSC.
+S15.10.4.1_A3_T1: FAIL_OK
+S15.10.4.1_A3_T2: FAIL_OK
+S15.10.4.1_A3_T3: FAIL_OK
+S15.10.4.1_A3_T4: FAIL_OK
+S15.10.4.1_A3_T5: FAIL_OK
+S15.10.4.1_A4_T2: FAIL_OK
+S15.10.4.1_A4_T3: FAIL_OK
+S15.10.4.1_A4_T5: FAIL_OK
+
 ##################### ES3 TESTS #########################
 # These tests check for ES3 semantics, and differ from ES5.
 # When we follow ES5 semantics, it's ok to fail the test.
index dae1843..59e7f5e 100644 (file)
@@ -4,11 +4,11 @@ tests from
 
   http://hg.ecmascript.org/tests/test262
 
-at revision 309 as 'data' in this directory.  Using later version
+at revision 334 as 'data' in this directory.  Using later version
 may be possible but the tests are only known to pass (and indeed run)
 with that revision.
 
-hg clone -r 309 http://hg.ecmascript.org/tests/test262 data
+hg clone -r 334 http://hg.ecmascript.org/tests/test262 data
 
 If you do update to a newer revision you may have to change the test
 harness adapter code since it uses internal functionality from the
index 3f395bd..c755289 100644 (file)
@@ -33,7 +33,18 @@ def FAIL_OK = FAIL, OKAY
 # '__proto__' should be treated as a normal property in JSON.
 S15.12.2_A1: FAIL
 
+# Sequencing of getter side effects on receiver and argument properties
+# is wrong. The receiver callback should be called before any arguments
+# are evaluated.
+# V8 Bug: http://code.google.com/p/v8/issues/detail?id=691
+11.2.3-3_3: FAIL
+
+# Prototypal inheritance of properties does not maintain accessibility.
+# The [[CanPut]] operation should traverse the prototype chain to
+# determine whether given property is writable or not.
 # V8 Bug: http://code.google.com/p/v8/issues/detail?id=1475
+8.14.4-8-b_1: FAIL
+8.14.4-8-b_2: FAIL
 15.2.3.6-4-405: FAIL
 15.2.3.6-4-410: FAIL
 15.2.3.6-4-415: FAIL
@@ -52,19 +63,6 @@ S15.1.2.2_A5.1_T1: FAIL_OK
 S15.8.2.16_A7: PASS || FAIL_OK
 S15.8.2.18_A7: PASS || FAIL_OK
 
-# We are more lenient in which string character escapes we allow than
-# the spec (7.8.4 p. 19) wants us to be.  This is for compatibility.
-S7.8.4_A6.1_T4: FAIL_OK
-S7.8.4_A6.2_T1: FAIL_OK
-S7.8.4_A6.2_T2: FAIL_OK
-S7.8.4_A7.1_T4: FAIL_OK
-S7.8.4_A7.2_T1: FAIL_OK
-S7.8.4_A7.2_T2: FAIL_OK
-S7.8.4_A7.2_T3: FAIL_OK
-S7.8.4_A7.2_T4: FAIL_OK
-S7.8.4_A7.2_T5: FAIL_OK
-S7.8.4_A7.2_T6: FAIL_OK
-
 # Linux for ia32 (and therefore simulators) default to extended 80 bit floating
 # point formats, so these tests checking 64-bit FP precision fail. The other
 # platforms/arch's pass these tests.
index e4a3bde..07f760c 100644 (file)
@@ -31,11 +31,12 @@ import os
 from os.path import join, exists
 import urllib
 import hashlib
+import sys
 import tarfile
 
 
-TEST_262_ARCHIVE_REVISION = '3a890174343c'  # This is the r309 revision.
-TEST_262_ARCHIVE_MD5 = 'be5d4cfbe69cef70430907b8f3a92b50'
+TEST_262_ARCHIVE_REVISION = 'fb327c439e20'  # This is the r334 revision.
+TEST_262_ARCHIVE_MD5 = '307acd166ec34629592f240dc12d57ed'
 TEST_262_URL = 'http://hg.ecmascript.org/tests/test262/archive/%s.tar.bz2'
 TEST_262_HARNESS = ['sta.js']
 
@@ -104,9 +105,12 @@ class Test262TestConfiguration(test.TestConfiguration):
     archive_url = TEST_262_URL % revision
     archive_name = join(self.root, 'test262-%s.tar.bz2' % revision)
     directory_name = join(self.root, 'data')
+    directory_old_name = join(self.root, 'data.old')
     if not exists(archive_name):
       print "Downloading test data from %s ..." % archive_url
       urllib.urlretrieve(archive_url, archive_name)
+      if exists(directory_name):
+        os.rename(directory_name, directory_old_name)
     if not exists(directory_name):
       print "Extracting test262-%s.tar.bz2 ..." % revision
       md5 = hashlib.md5()
@@ -114,9 +118,14 @@ class Test262TestConfiguration(test.TestConfiguration):
         for chunk in iter(lambda: f.read(8192), ''):
           md5.update(chunk)
       if md5.hexdigest() != TEST_262_ARCHIVE_MD5:
+        os.remove(archive_name)
         raise Exception("Hash mismatch of test data file")
       archive = tarfile.open(archive_name, 'r:bz2')
-      archive.extractall(join(self.root))
+      if sys.platform in ('win32', 'cygwin'):
+        # Magic incantation to allow longer path names on Windows.
+        archive.extractall(u'\\\\?\\%s' % self.root)
+      else:
+        archive.extractall(self.root)
       os.rename(join(self.root, 'test262-%s' % revision), directory_name)
 
   def GetBuildRequirements(self):
index e6da828..1103a97 100644 (file)
 expected_static_init_count=3
 
 v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
-d8="${v8_root}/d8"
+
+if [ -n "$1" ] ; then
+  d8="${v8_root}/$1"
+else
+  d8="${v8_root}/d8"
+fi
 
 if [ ! -f "$d8" ]; then
-  echo "Please build the project with SCons."
+  echo "d8 binary not found: $d8"
   exit 1
 fi
 
-static_inits=$(nm "$d8" | grep _GLOBAL__I | awk '{ print $NF; }')
+static_inits=$(nm "$d8" | grep _GLOBAL_ | grep _I_ | awk '{ print $NF; }')
 
 static_init_count=$(echo "$static_inits" | wc -l)
 
@@ -52,4 +57,7 @@ if [ $static_init_count -gt $expected_static_init_count ]; then
   echo "Too many static initializers."
   echo "$static_inits"
   exit 1
+else
+  echo "Static initializer check passed ($static_init_count initializers)."
+  exit 0
 fi
index d1b8b01..2b806ca 100644 (file)
@@ -184,7 +184,8 @@ the uploaded CL."
 apply_patch() {
   patch $REVERSE_PATCH -p1 < "$1" > "$PATCH_OUTPUT_FILE" || \
     { cat "$PATCH_OUTPUT_FILE" && die "Applying the patch failed."; }
-  tee < "$PATCH_OUTPUT_FILE" >(awk '{print $NF}' >> "$TOUCHED_FILES_FILE")
+  tee < "$PATCH_OUTPUT_FILE" >(grep "patching file" \
+                               | awk '{print $NF}' >> "$TOUCHED_FILES_FILE")
   rm "$PATCH_OUTPUT_FILE"
 }
 
index 9977289..29d4755 100755 (executable)
@@ -27,6 +27,7 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+import cmd
 import ctypes
 import mmap
 import optparse
@@ -36,6 +37,7 @@ import sys
 import types
 import codecs
 import re
+import struct
 
 
 USAGE="""usage: %prog [OPTION]...
@@ -106,6 +108,24 @@ class Descriptor(object):
     return Raw
 
 
+def do_dump(reader, heap):
+  """Dump all available memory regions."""
+  def dump_region(reader, start, size, location):
+    print "%s - %s" % (reader.FormatIntPtr(start),
+                       reader.FormatIntPtr(start + size))
+    for slot in xrange(start,
+                       start + size,
+                       reader.PointerSize()):
+      maybe_address = reader.ReadUIntPtr(slot)
+      heap_object = heap.FindObject(maybe_address)
+      print "%s: %s" % (reader.FormatIntPtr(slot),
+                        reader.FormatIntPtr(maybe_address))
+      if heap_object:
+        heap_object.Print(Printer())
+        print
+
+  reader.ForEachMemoryRegion(dump_region)
+
 # Set of structures and constants that describe the layout of minidump
 # files. Based on MSDN and Google Breakpad.
 
@@ -444,6 +464,33 @@ class MinidumpReader(object):
     location = self.FindLocation(address)
     return self.minidump[location:location + size]
 
+  def _ReadWord(self, location):
+    if self.arch == MD_CPU_ARCHITECTURE_AMD64:
+      return ctypes.c_uint64.from_buffer(self.minidump, location).value
+    elif self.arch == MD_CPU_ARCHITECTURE_X86:
+      return ctypes.c_uint32.from_buffer(self.minidump, location).value
+
+  def ForEachMemoryRegion(self, cb):
+    if self.memory_list64 is not None:
+      for r in self.memory_list64.ranges:
+        location = self.memory_list64.base_rva + offset
+        cb(self, r.start, r.size, location)
+        offset += r.size
+
+    if self.memory_list is not None:
+      for r in self.memory_list.ranges:
+        cb(self, r.start, r.memory.data_size, r.memory.rva)
+
+  def FindWord(self, word):
+    def search_inside_region(reader, start, size, location):
+      for loc in xrange(location, location + size):
+        if reader._ReadWord(loc) == word:
+          slot = start + (loc - location)
+          print "%s: %s" % (reader.FormatIntPtr(slot),
+                            reader.FormatIntPtr(word))
+
+    self.ForEachMemoryRegion(search_inside_region)
+
   def FindLocation(self, address):
     offset = 0
     if self.memory_list64 is not None:
@@ -745,7 +792,10 @@ class ConsString(String):
     self.right = self.ObjectField(self.RightOffset())
 
   def GetChars(self):
-    return self.left.GetChars() + self.right.GetChars()
+    try:
+      return self.left.GetChars() + self.right.GetChars()
+    except:
+      return "***CAUGHT EXCEPTION IN GROKDUMP***"
 
 
 class Oddball(HeapObject):
@@ -1011,6 +1061,42 @@ CONTEXT_FOR_ARCH = {
       ['eax', 'ebx', 'ecx', 'edx', 'edi', 'esi', 'ebp', 'esp', 'eip']
 }
 
+class InspectionShell(cmd.Cmd):
+  def __init__(self, reader, heap):
+    cmd.Cmd.__init__(self)
+    self.reader = reader
+    self.heap = heap
+    self.prompt = "(grok) "
+
+  def do_dd(self, address):
+    "Interpret memory at the given address (if available)"\
+    " as a sequence of words."
+    start = int(address, 16)
+    for slot in xrange(start,
+                       start + self.reader.PointerSize() * 10,
+                       self.reader.PointerSize()):
+      maybe_address = self.reader.ReadUIntPtr(slot)
+      heap_object = self.heap.FindObject(maybe_address)
+      print "%s: %s" % (self.reader.FormatIntPtr(slot),
+                        self.reader.FormatIntPtr(maybe_address))
+      if heap_object:
+        heap_object.Print(Printer())
+        print
+
+  def do_s(self, word):
+    "Search for a given word in available memory regions"
+    word = int(word, 0)
+    print "searching for word", word
+    self.reader.FindWord(word)
+
+  def do_list(self, smth):
+    """List all available memory regions."""
+    def print_region(reader, start, size, location):
+      print "%s - %s" % (reader.FormatIntPtr(start),
+                         reader.FormatIntPtr(start + size))
+
+    self.reader.ForEachMemoryRegion(print_region)
+
 def AnalyzeMinidump(options, minidump_name):
   reader = MinidumpReader(options, minidump_name)
   DebugPrint("========================================")
@@ -1045,21 +1131,29 @@ def AnalyzeMinidump(options, minidump_name):
     print FormatDisasmLine(start, heap, line)
   print
 
-  print "Annotated stack (from exception.esp to bottom):"
-  for slot in xrange(stack_top, stack_bottom, reader.PointerSize()):
-    maybe_address = reader.ReadUIntPtr(slot)
-    heap_object = heap.FindObject(maybe_address)
-    print "%s: %s" % (reader.FormatIntPtr(slot),
-                      reader.FormatIntPtr(maybe_address))
-    if heap_object:
-      heap_object.Print(Printer())
-      print
+  if options.full:
+    do_dump(reader, heap)
+
+  if options.shell:
+    InspectionShell(reader, heap).cmdloop("type help to get help")
+  else:
+    print "Annotated stack (from exception.esp to bottom):"
+    for slot in xrange(stack_top, stack_bottom, reader.PointerSize()):
+      maybe_address = reader.ReadUIntPtr(slot)
+      heap_object = heap.FindObject(maybe_address)
+      print "%s: %s" % (reader.FormatIntPtr(slot),
+                        reader.FormatIntPtr(maybe_address))
+      if heap_object:
+        heap_object.Print(Printer())
+        print
 
   reader.Dispose()
 
 
 if __name__ == "__main__":
   parser = optparse.OptionParser(USAGE)
+  parser.add_option("-s", "--shell", dest="shell", action="store_true")
+  parser.add_option("-f", "--full", dest="full", action="store_true")
   options, args = parser.parse_args()
   if len(args) != 1:
     parser.print_help()
index 46f85fe..aa91139 100644 (file)
                 # has some sources to link into the component.
                 '../../src/v8dll-main.cc',
               ],
+              'defines': [
+                'V8_SHARED',
+                'BUILDING_V8_SHARED',
+              ],
+              'direct_dependent_settings': {
+                'defines': [
+                  'V8_SHARED',
+                  'USING_V8_SHARED',
+                ],
+              },
               'conditions': [
                 ['OS=="mac"', {
                   'xcode_settings': {
                     'OTHER_LDFLAGS': ['-dynamiclib', '-all_load']
                   },
                 }],
-                ['OS=="win"', {
-                  'defines': [
-                    'BUILDING_V8_SHARED',
-                  ],
-                  'direct_dependent_settings': {
-                    'defines': [
-                      'USING_V8_SHARED',
-                    ],
-                  },
-                }, {
-                  'defines': [
-                    'V8_SHARED',
-                  ],
-                  'direct_dependent_settings': {
-                    'defines': [
-                      'V8_SHARED',
-                    ],
-                  },
-                }],
                 ['soname_version!=""', {
                   'product_extension': 'so.<(soname_version)',
                 }],
               'dependencies': ['mksnapshot', 'js2c'],
             }],
             ['component=="shared_library"', {
-              'conditions': [
-                ['OS=="win"', {
-                  'defines': [
-                    'BUILDING_V8_SHARED',
-                  ],
-                  'direct_dependent_settings': {
-                    'defines': [
-                      'USING_V8_SHARED',
-                    ],
-                  },
-                }, {
-                  'defines': [
-                    'V8_SHARED',
-                  ],
-                  'direct_dependent_settings': {
-                    'defines': [
-                      'V8_SHARED',
-                    ],
-                  },
-                }],
+              'defines': [
+                'V8_SHARED',
+                'BUILDING_V8_SHARED',
               ],
+              'direct_dependent_settings': {
+                'defines': [
+                  'V8_SHARED',
+                  'USING_V8_SHARED',
+                ],
+              },
             }],
           ],
           'dependencies': [
index b41948f..d06cbe4 100644 (file)
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 #
-# Copyright 2006-2008 the V8 project authors. All rights reserved.
+# Copyright 2012 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -195,7 +195,7 @@ def ReadMacros(lines):
       macro_match = MACRO_PATTERN.match(line)
       if macro_match:
         name = macro_match.group(1)
-        args = args = [match.strip() for match in macro_match.group(2).split(',')]
+        args = [match.strip() for match in macro_match.group(2).split(',')]
         body = macro_match.group(3).strip()
         macros.append((re.compile("\\b%s\\(" % name), TextMacro(args, body)))
       else:
index 49e0034..250dea9 100644 (file)
@@ -1,6 +1,6 @@
 #!/usr/bin/python2.4
 
-# Copyright 2009 the V8 project authors. All rights reserved.
+# Copyright 2012 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
index a5f4c61..a0b81e8 100755 (executable)
@@ -114,12 +114,15 @@ def CppLintWorker(command):
     while True:
       out_line = process.stderr.readline()
       if out_line == '' and process.poll() != None:
+        if error_count == -1:
+          print "Failed to process %s" % command.pop()
+          return 1
         break
       m = LINT_OUTPUT_PATTERN.match(out_line)
       if m:
         out_lines += out_line
         error_count += 1
-    sys.stderr.write(out_lines)
+    sys.stdout.write(out_lines)
     return error_count
   except KeyboardInterrupt:
     process.kill()
@@ -300,7 +303,8 @@ class SourceProcessor(SourceFileProcessor):
               or (name == 'third_party')
               or (name == 'gyp')
               or (name == 'out')
-              or (name == 'obj'))
+              or (name == 'obj')
+              or (name == 'DerivedSources'))
 
   IGNORE_COPYRIGHTS = ['cpplint.py',
                        'earley-boyer.js',
index 3fb5b34..ff6dd1d 100755 (executable)
@@ -130,6 +130,7 @@ if [ $START_STEP -le $CURRENT_STEP ] ; then
         | grep "^BUG=" | grep -v "BUG=$" | grep -v "BUG=none$" \
         | sed -e 's/^/        /' \
         | sed -e 's/BUG=v8:\(.*\)$/(issue \1)/' \
+        | sed -e 's/BUG=chromium:\(.*\)$/(Chromium issue \1)/' \
         | sed -e 's/BUG=\(.*\)$/(Chromium issue \1)/' \
         >> "$CHANGELOG_ENTRY_FILE"
     # Append the commit's author for reference.
@@ -320,6 +321,14 @@ if [ $START_STEP -le $CURRENT_STEP ] ; then
     || die "'git svn tag' failed."
 fi
 
+if [ -z "$CHROME_PATH" ] ; then
+  echo ">>> (asking for Chromium checkout)"
+  echo -n "Do you have a \"NewGit\" Chromium checkout and want this script \
+to automate creation of the roll CL? If yes, enter the path to (and including) \
+the \"src\" directory here, otherwise just press <Return>: "
+  read CHROME_PATH
+fi
+
 if [ -n "$CHROME_PATH" ] ; then
 
   let CURRENT_STEP+=1
index fda4105..eda2459 100755 (executable)
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 #
-# Copyright 2011 the V8 project authors. All rights reserved.
+# Copyright 2012 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -56,6 +56,9 @@ def BuildOptions():
   result.add_option("--no-presubmit",
                     help='Skip presubmit checks',
                     default=False, action="store_true")
+  result.add_option("--buildbot",
+                    help='Adapt to path structure used on buildbots',
+                    default=False, action="store_true")
 
   # Flags this wrapper script handles itself:
   result.add_option("-m", "--mode",
@@ -144,14 +147,16 @@ def ProcessOptions(options):
     options.mode = options.mode.split(',')
     options.arch = options.arch.split(',')
   for mode in options.mode:
-    if not mode in ['debug', 'release']:
+    if not mode.lower() in ['debug', 'release']:
       print "Unknown mode %s" % mode
       return False
   for arch in options.arch:
     if not arch in ['ia32', 'x64', 'arm', 'mips']:
       print "Unknown architecture %s" % arch
       return False
-
+  if options.buildbot:
+    # Buildbots run presubmit tests as a separate step.
+    options.no_presubmit = True
   return True
 
 
@@ -213,22 +218,26 @@ def Main():
     return 1
 
   workspace = abspath(join(dirname(sys.argv[0]), '..'))
+  returncodes = 0
 
   if not options.no_presubmit:
     print ">>> running presubmit tests"
-    subprocess.call([workspace + '/tools/presubmit.py'])
+    returncodes += subprocess.call([workspace + '/tools/presubmit.py'])
 
   args_for_children = [workspace + '/tools/test.py'] + PassOnOptions(options)
   args_for_children += ['--no-build', '--build-system=gyp']
   for arg in args:
     args_for_children += [arg]
-  returncodes = 0
   env = os.environ
 
   for mode in options.mode:
     for arch in options.arch:
       print ">>> running tests for %s.%s" % (arch, mode)
-      shellpath = workspace + '/' + options.outdir + '/' + arch + '.' + mode
+      if options.buildbot:
+        shellpath = workspace + '/' + options.outdir + '/' + mode
+        mode = mode.lower()
+      else:
+        shellpath = workspace + '/' + options.outdir + '/' + arch + '.' + mode
       env['LD_LIBRARY_PATH'] = shellpath + '/lib.target'
       shell = shellpath + "/d8"
       child = subprocess.Popen(' '.join(args_for_children +