From b15a10e7a014674ef6f71c51ad84032fb7b802e2 Mon Sep 17 00:00:00 2001 From: Ben Noordhuis Date: Mon, 25 Feb 2013 22:45:23 +0100 Subject: [PATCH] deps: downgrade v8 to 3.14.5 V8 3.15 and newer have stability and performance issues. Roll back to a known-good version. --- deps/v8/.gitignore | 5 - deps/v8/AUTHORS | 3 - deps/v8/ChangeLog | 158 -- deps/v8/build/android.gypi | 9 +- deps/v8/build/common.gypi | 52 +- deps/v8/include/v8-profiler.h | 17 +- deps/v8/include/v8.h | 630 +++----- deps/v8/samples/shell.cc | 27 +- deps/v8/src/accessors.cc | 104 +- deps/v8/src/api.cc | 380 ++--- deps/v8/src/api.h | 6 +- deps/v8/src/arm/assembler-arm-inl.h | 31 +- deps/v8/src/arm/assembler-arm.cc | 71 +- deps/v8/src/arm/assembler-arm.h | 54 +- deps/v8/src/arm/builtins-arm.cc | 33 - deps/v8/src/arm/code-stubs-arm.cc | 894 ++++++------ deps/v8/src/arm/code-stubs-arm.h | 123 +- deps/v8/src/arm/codegen-arm.cc | 256 +--- deps/v8/src/arm/codegen-arm.h | 16 - deps/v8/src/arm/constants-arm.h | 15 +- deps/v8/src/arm/deoptimizer-arm.cc | 32 +- deps/v8/src/arm/disasm-arm.cc | 12 +- deps/v8/src/arm/full-codegen-arm.cc | 212 ++- deps/v8/src/arm/ic-arm.cc | 38 +- deps/v8/src/arm/lithium-arm.cc | 257 ++-- deps/v8/src/arm/lithium-arm.h | 213 ++- deps/v8/src/arm/lithium-codegen-arm.cc | 569 +++----- deps/v8/src/arm/lithium-codegen-arm.h | 6 - deps/v8/src/arm/macro-assembler-arm.cc | 86 +- deps/v8/src/arm/macro-assembler-arm.h | 30 +- deps/v8/src/arm/regexp-macro-assembler-arm.cc | 4 +- deps/v8/src/arm/simulator-arm.cc | 112 +- deps/v8/src/arm/simulator-arm.h | 4 - deps/v8/src/arm/stub-cache-arm.cc | 132 +- deps/v8/src/array.js | 18 +- deps/v8/src/assembler.cc | 187 +-- deps/v8/src/assembler.h | 75 +- deps/v8/src/ast.cc | 20 +- deps/v8/src/ast.h | 38 +- deps/v8/src/atomicops.h | 4 +- deps/v8/src/atomicops_internals_tsan.h | 335 ----- deps/v8/src/bootstrapper.cc | 23 +- deps/v8/src/bootstrapper.h | 2 +- deps/v8/src/builtins.cc | 702 +++------ deps/v8/src/builtins.h | 31 +- deps/v8/src/code-stubs.cc | 181 +-- deps/v8/src/code-stubs.h | 258 ++-- deps/v8/src/codegen.cc | 1 - deps/v8/src/codegen.h | 14 - deps/v8/src/collection.js | 46 +- deps/v8/src/compilation-cache.cc | 2 +- deps/v8/src/compiler.cc | 119 +- deps/v8/src/compiler.h | 29 +- deps/v8/src/contexts.cc | 25 +- deps/v8/src/contexts.h | 24 +- deps/v8/src/counters.cc | 7 +- deps/v8/src/d8.cc | 407 +++--- deps/v8/src/d8.h | 28 +- deps/v8/src/date.js | 2 +- deps/v8/src/debug-debugger.js | 121 +- deps/v8/src/debug.cc | 16 +- deps/v8/src/deoptimizer.cc | 114 +- deps/v8/src/deoptimizer.h | 29 +- deps/v8/src/elements-kind.cc | 9 +- deps/v8/src/elements-kind.h | 8 - deps/v8/src/elements.cc | 546 +++---- deps/v8/src/elements.h | 43 +- deps/v8/src/execution.cc | 34 +- deps/v8/src/execution.h | 7 +- .../src/extensions/externalize-string-extension.cc | 5 +- deps/v8/src/extensions/gc-extension.cc | 6 +- deps/v8/src/factory.cc | 31 +- deps/v8/src/factory.h | 9 +- deps/v8/src/flag-definitions.h | 29 +- deps/v8/src/frames.cc | 4 +- deps/v8/src/full-codegen.cc | 240 +--- deps/v8/src/full-codegen.h | 19 +- deps/v8/src/global-handles.cc | 95 +- deps/v8/src/global-handles.h | 20 +- deps/v8/src/handles.cc | 29 +- deps/v8/src/handles.h | 11 +- deps/v8/src/heap-inl.h | 14 +- deps/v8/src/heap-profiler.cc | 43 +- deps/v8/src/heap-profiler.h | 32 +- deps/v8/src/heap.cc | 439 +++--- deps/v8/src/heap.h | 62 +- deps/v8/src/hydrogen-instructions.cc | 498 +++---- deps/v8/src/hydrogen-instructions.h | 688 +++++---- deps/v8/src/hydrogen.cc | 948 ++++++------ deps/v8/src/hydrogen.h | 72 +- deps/v8/src/ia32/assembler-ia32-inl.h | 32 +- deps/v8/src/ia32/assembler-ia32.cc | 107 +- deps/v8/src/ia32/assembler-ia32.h | 35 +- deps/v8/src/ia32/builtins-ia32.cc | 36 - deps/v8/src/ia32/code-stubs-ia32.cc | 812 ++++++----- deps/v8/src/ia32/code-stubs-ia32.h | 90 ++ deps/v8/src/ia32/codegen-ia32.cc | 203 +-- deps/v8/src/ia32/codegen-ia32.h | 14 - deps/v8/src/ia32/deoptimizer-ia32.cc | 30 +- deps/v8/src/ia32/disasm-ia32.cc | 9 - deps/v8/src/ia32/full-codegen-ia32.cc | 198 ++- deps/v8/src/ia32/ic-ia32.cc | 36 +- deps/v8/src/ia32/lithium-codegen-ia32.cc | 359 ++--- deps/v8/src/ia32/lithium-codegen-ia32.h | 6 - deps/v8/src/ia32/lithium-ia32.cc | 220 ++- deps/v8/src/ia32/lithium-ia32.h | 174 ++- deps/v8/src/ia32/macro-assembler-ia32.cc | 57 +- deps/v8/src/ia32/macro-assembler-ia32.h | 4 +- deps/v8/src/ia32/regexp-macro-assembler-ia32.cc | 4 +- deps/v8/src/ia32/stub-cache-ia32.cc | 100 +- deps/v8/src/ic-inl.h | 3 +- deps/v8/src/ic.cc | 339 ++--- deps/v8/src/ic.h | 35 +- deps/v8/src/incremental-marking-inl.h | 27 +- deps/v8/src/incremental-marking.cc | 261 ++-- deps/v8/src/incremental-marking.h | 21 +- deps/v8/src/interface.cc | 13 +- deps/v8/src/interface.h | 35 +- deps/v8/src/isolate.cc | 178 +-- deps/v8/src/isolate.h | 21 +- deps/v8/src/json-parser.h | 152 +- deps/v8/src/json-stringifier.h | 748 ---------- deps/v8/src/json.js | 143 +- deps/v8/src/jsregexp.cc | 6 +- deps/v8/src/lithium.h | 4 +- deps/v8/src/liveedit-debugger.js | 37 +- deps/v8/src/liveedit.cc | 196 +-- deps/v8/src/liveobjectlist.cc | 2 +- deps/v8/src/log-utils.cc | 9 +- deps/v8/src/log.cc | 198 ++- deps/v8/src/log.h | 44 +- deps/v8/src/macros.py | 2 - deps/v8/src/mark-compact.cc | 237 ++- deps/v8/src/mark-compact.h | 43 +- deps/v8/src/math.js | 30 +- deps/v8/src/messages.cc | 4 +- deps/v8/src/messages.js | 420 +++--- deps/v8/src/mips/assembler-mips-inl.h | 23 - deps/v8/src/mips/assembler-mips.cc | 54 +- deps/v8/src/mips/assembler-mips.h | 25 +- deps/v8/src/mips/builtins-mips.cc | 42 - deps/v8/src/mips/code-stubs-mips.cc | 909 ++++++------ deps/v8/src/mips/code-stubs-mips.h | 127 +- deps/v8/src/mips/codegen-mips.cc | 266 +--- deps/v8/src/mips/codegen-mips.h | 16 - deps/v8/src/mips/deoptimizer-mips.cc | 8 +- deps/v8/src/mips/full-codegen-mips.cc | 204 ++- deps/v8/src/mips/ic-mips.cc | 39 +- deps/v8/src/mips/lithium-codegen-mips.cc | 576 ++++---- deps/v8/src/mips/lithium-codegen-mips.h | 6 - deps/v8/src/mips/lithium-mips.cc | 222 ++- deps/v8/src/mips/lithium-mips.h | 176 ++- deps/v8/src/mips/macro-assembler-mips.cc | 92 +- deps/v8/src/mips/macro-assembler-mips.h | 26 +- deps/v8/src/mips/regexp-macro-assembler-mips.cc | 4 +- deps/v8/src/mips/simulator-mips.cc | 110 +- deps/v8/src/mips/simulator-mips.h | 5 - deps/v8/src/mips/stub-cache-mips.cc | 194 +-- deps/v8/src/mirror-debugger.js | 25 - deps/v8/src/object-observe.js | 242 ---- deps/v8/src/objects-debug.cc | 9 +- deps/v8/src/objects-inl.h | 517 ++----- deps/v8/src/objects-printer.cc | 11 +- deps/v8/src/objects-visiting-inl.h | 69 +- deps/v8/src/objects-visiting.cc | 4 +- deps/v8/src/objects-visiting.h | 17 +- deps/v8/src/objects.cc | 1520 +++++++------------- deps/v8/src/objects.h | 357 ++--- deps/v8/src/optimizing-compiler-thread.cc | 25 +- deps/v8/src/optimizing-compiler-thread.h | 20 +- deps/v8/src/parser.cc | 128 +- deps/v8/src/parser.h | 1 + deps/v8/src/platform-cygwin.cc | 28 +- deps/v8/src/platform-freebsd.cc | 51 +- deps/v8/src/platform-linux.cc | 57 +- deps/v8/src/platform-macos.cc | 27 +- deps/v8/src/platform-nullos.cc | 6 - deps/v8/src/platform-openbsd.cc | 58 +- deps/v8/src/platform-posix.cc | 26 +- deps/v8/src/platform-solaris.cc | 67 +- deps/v8/src/platform-win32.cc | 37 +- deps/v8/src/platform.h | 11 +- deps/v8/src/preparser.h | 4 +- deps/v8/src/prettyprinter.cc | 15 - deps/v8/src/profile-generator-inl.h | 2 +- deps/v8/src/profile-generator.cc | 102 +- deps/v8/src/profile-generator.h | 7 +- deps/v8/src/property-details.h | 4 - deps/v8/src/property.cc | 2 +- deps/v8/src/property.h | 47 +- deps/v8/src/proxy.js | 9 +- deps/v8/src/regexp-macro-assembler.cc | 8 +- deps/v8/src/regexp-stack.cc | 1 - deps/v8/src/regexp.js | 8 +- deps/v8/src/rewriter.cc | 9 +- deps/v8/src/runtime-profiler.cc | 20 +- deps/v8/src/runtime-profiler.h | 2 + deps/v8/src/runtime.cc | 1381 ++++++++---------- deps/v8/src/runtime.h | 36 +- deps/v8/src/scopeinfo.cc | 29 - deps/v8/src/scopeinfo.h | 67 +- deps/v8/src/scopes.cc | 170 ++- deps/v8/src/scopes.h | 31 +- deps/v8/src/serialize.cc | 6 +- deps/v8/src/spaces-inl.h | 13 - deps/v8/src/spaces.cc | 63 +- deps/v8/src/spaces.h | 71 +- deps/v8/src/store-buffer.h | 4 +- deps/v8/src/string.js | 59 +- deps/v8/src/stub-cache.cc | 84 +- deps/v8/src/stub-cache.h | 16 +- deps/v8/src/token.h | 1 - deps/v8/src/type-info.cc | 165 ++- deps/v8/src/type-info.h | 12 +- deps/v8/src/uri.js | 86 +- deps/v8/src/v8-counters.cc | 11 + deps/v8/src/v8-counters.h | 17 + deps/v8/src/v8.cc | 16 +- deps/v8/src/v8conversions.cc | 4 +- deps/v8/src/v8globals.h | 43 +- deps/v8/src/v8natives.js | 88 +- deps/v8/src/v8utils.h | 2 - deps/v8/src/variables.cc | 6 +- deps/v8/src/variables.h | 4 +- deps/v8/src/version.cc | 6 +- deps/v8/src/vm-state-inl.h | 12 +- deps/v8/src/x64/assembler-x64-inl.h | 28 - deps/v8/src/x64/assembler-x64.cc | 101 +- deps/v8/src/x64/assembler-x64.h | 40 +- deps/v8/src/x64/builtins-x64.cc | 40 - deps/v8/src/x64/code-stubs-x64.cc | 654 +++++---- deps/v8/src/x64/code-stubs-x64.h | 96 ++ deps/v8/src/x64/codegen-x64.cc | 189 +-- deps/v8/src/x64/codegen-x64.h | 15 +- deps/v8/src/x64/deoptimizer-x64.cc | 30 +- deps/v8/src/x64/disasm-x64.cc | 12 - deps/v8/src/x64/full-codegen-x64.cc | 196 ++- deps/v8/src/x64/ic-x64.cc | 35 +- deps/v8/src/x64/lithium-codegen-x64.cc | 521 +++---- deps/v8/src/x64/lithium-codegen-x64.h | 6 - deps/v8/src/x64/lithium-x64.cc | 215 +-- deps/v8/src/x64/lithium-x64.h | 160 ++- deps/v8/src/x64/macro-assembler-x64.cc | 57 +- deps/v8/src/x64/macro-assembler-x64.h | 3 +- deps/v8/src/x64/regexp-macro-assembler-x64.cc | 4 +- deps/v8/src/x64/stub-cache-x64.cc | 101 +- deps/v8/test/cctest/cctest.gyp | 1 - deps/v8/test/cctest/cctest.h | 20 - deps/v8/test/cctest/test-accessors.cc | 26 - deps/v8/test/cctest/test-alloc.cc | 26 +- deps/v8/test/cctest/test-api.cc | 421 +----- deps/v8/test/cctest/test-assembler-arm.cc | 4 +- deps/v8/test/cctest/test-compiler.cc | 3 +- deps/v8/test/cctest/test-debug.cc | 8 +- deps/v8/test/cctest/test-decls.cc | 170 ++- deps/v8/test/cctest/test-dictionary.cc | 6 +- deps/v8/test/cctest/test-disasm-arm.cc | 6 +- deps/v8/test/cctest/test-heap-profiler.cc | 28 +- deps/v8/test/cctest/test-heap.cc | 493 +------ deps/v8/test/cctest/test-lockers.cc | 7 +- deps/v8/test/cctest/test-log.cc | 8 +- deps/v8/test/cctest/test-mark-compact.cc | 1 - deps/v8/test/cctest/test-object-observe.cc | 280 ---- deps/v8/test/cctest/test-parsing.cc | 86 +- deps/v8/test/cctest/test-regexp.cc | 20 +- deps/v8/test/mjsunit/array-bounds-check-removal.js | 24 - deps/v8/test/mjsunit/array-natives-elements.js | 307 ---- deps/v8/test/mjsunit/array-reduce.js | 16 +- deps/v8/test/mjsunit/array-slice.js | 12 - deps/v8/test/mjsunit/array-store-and-grow.js | 5 +- deps/v8/test/mjsunit/compiler/multiply-add.js | 69 - deps/v8/test/mjsunit/compiler/proto-chain-load.js | 44 - deps/v8/test/mjsunit/compiler/rotate.js | 224 --- .../test/mjsunit/debug-liveedit-compile-error.js | 60 - deps/v8/test/mjsunit/debug-liveedit-literals.js | 94 -- deps/v8/test/mjsunit/debug-set-variable-value.js | 176 --- deps/v8/test/mjsunit/elements-kind.js | 3 +- deps/v8/test/mjsunit/elements-length-no-holey.js | 33 - deps/v8/test/mjsunit/error-accessors.js | 54 - deps/v8/test/mjsunit/error-constructors.js | 15 +- deps/v8/test/mjsunit/function-call.js | 32 +- deps/v8/test/mjsunit/fuzz-natives-part1.js | 9 +- deps/v8/test/mjsunit/fuzz-natives-part2.js | 9 +- deps/v8/test/mjsunit/fuzz-natives-part3.js | 9 +- deps/v8/test/mjsunit/fuzz-natives-part4.js | 9 +- deps/v8/test/mjsunit/harmony/collections.js | 58 +- deps/v8/test/mjsunit/harmony/module-linking.js | 2 +- deps/v8/test/mjsunit/harmony/object-observe.js | 873 ----------- deps/v8/test/mjsunit/harmony/proxies-json.js | 178 --- deps/v8/test/mjsunit/harmony/proxies.js | 5 - deps/v8/test/mjsunit/json-parser-recursive.js | 33 - deps/v8/test/mjsunit/json-stringify-recursive.js | 52 - deps/v8/test/mjsunit/json.js | 36 - deps/v8/test/mjsunit/json2.js | 153 -- deps/v8/test/mjsunit/manual-parallel-recompile.js | 79 - deps/v8/test/mjsunit/math-exp-precision.js | 64 - .../test/mjsunit/math-floor-of-div-minus-zero.js | 1 - deps/v8/test/mjsunit/mjsunit.status | 10 - deps/v8/test/mjsunit/regress/regress-121407.js | 2 +- deps/v8/test/mjsunit/regress/regress-164442.js | 45 - deps/v8/test/mjsunit/regress/regress-166553.js | 33 - deps/v8/test/mjsunit/regress/regress-1692.js | 2 +- deps/v8/test/mjsunit/regress/regress-1980.js | 2 +- deps/v8/test/mjsunit/regress/regress-2263.js | 30 - deps/v8/test/mjsunit/regress/regress-2315.js | 40 - deps/v8/test/mjsunit/regress/regress-2398.js | 41 - deps/v8/test/mjsunit/regress/regress-2410.js | 36 - deps/v8/test/mjsunit/regress/regress-2416.js | 75 - deps/v8/test/mjsunit/regress/regress-2433.js | 36 - deps/v8/test/mjsunit/regress/regress-2437.js | 156 -- deps/v8/test/mjsunit/regress/regress-2438.js | 52 - deps/v8/test/mjsunit/regress/regress-2443.js | 129 -- deps/v8/test/mjsunit/regress/regress-2444.js | 120 -- deps/v8/test/mjsunit/regress/regress-2489.js | 50 - deps/v8/test/mjsunit/regress/regress-2499.js | 40 - deps/v8/test/mjsunit/regress/regress-492.js | 40 +- .../test/mjsunit/regress/regress-crbug-135066.js | 14 +- .../test/mjsunit/regress/regress-crbug-157019.js | 54 - .../test/mjsunit/regress/regress-crbug-157520.js | 38 - .../test/mjsunit/regress/regress-crbug-158185.js | 39 - .../test/mjsunit/regress/regress-crbug-160010.js | 33 - .../test/mjsunit/regress/regress-crbug-162085.js | 71 - .../test/mjsunit/regress/regress-crbug-170856.js | 33 - .../v8/test/mjsunit/regress/regress-crbug-18639.js | 14 +- .../mjsunit/regress/regress-delete-empty-double.js | 40 - .../mjsunit/regress/regress-json-stringify-gc.js | 41 - .../regress/regress-observe-empty-double-array.js | 37 - deps/v8/test/mjsunit/shift-for-integer-div.js | 59 - deps/v8/test/mjsunit/stack-traces-overflow.js | 122 -- deps/v8/test/mjsunit/strict-mode.js | 47 +- deps/v8/test/mjsunit/string-natives.js | 72 - deps/v8/test/mjsunit/string-split.js | 17 - deps/v8/test/mjsunit/testcfg.py | 3 +- deps/v8/test/mjsunit/tools/tickprocessor-test.log | 38 +- deps/v8/test/mjsunit/uri.js | 12 - deps/v8/test/mozilla/mozilla.status | 10 +- deps/v8/test/test262/README | 4 +- deps/v8/test/test262/test262.status | 12 +- deps/v8/test/test262/testcfg.py | 11 +- deps/v8/tools/gen-postmortem-metadata.py | 14 +- deps/v8/tools/grokdump.py | 151 +- deps/v8/tools/gyp/v8.gyp | 10 - deps/v8/tools/ll_prof.py | 63 +- deps/v8/tools/plot-timer-events | 71 - deps/v8/tools/plot-timer-events.js | 576 -------- deps/v8/tools/run-llprof.sh | 69 - deps/v8/tools/run-tests.py | 13 +- deps/v8/tools/tick-processor.html | 168 --- deps/v8/tools/tickprocessor.js | 16 +- 349 files changed, 11394 insertions(+), 24676 deletions(-) delete mode 100644 deps/v8/src/atomicops_internals_tsan.h delete mode 100644 deps/v8/src/json-stringifier.h delete mode 100644 deps/v8/src/object-observe.js delete mode 100644 deps/v8/test/cctest/test-object-observe.cc delete mode 100644 deps/v8/test/mjsunit/array-natives-elements.js delete mode 100644 deps/v8/test/mjsunit/compiler/multiply-add.js delete mode 100644 deps/v8/test/mjsunit/compiler/proto-chain-load.js delete mode 100644 deps/v8/test/mjsunit/compiler/rotate.js delete mode 100644 deps/v8/test/mjsunit/debug-liveedit-compile-error.js delete mode 100644 deps/v8/test/mjsunit/debug-liveedit-literals.js delete mode 100644 deps/v8/test/mjsunit/debug-set-variable-value.js delete mode 100644 deps/v8/test/mjsunit/elements-length-no-holey.js delete mode 100644 deps/v8/test/mjsunit/error-accessors.js delete mode 100644 deps/v8/test/mjsunit/harmony/object-observe.js delete mode 100644 deps/v8/test/mjsunit/harmony/proxies-json.js delete mode 100644 deps/v8/test/mjsunit/json-parser-recursive.js delete mode 100644 deps/v8/test/mjsunit/json-stringify-recursive.js delete mode 100644 deps/v8/test/mjsunit/json2.js delete mode 100644 deps/v8/test/mjsunit/manual-parallel-recompile.js delete mode 100644 deps/v8/test/mjsunit/math-exp-precision.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-164442.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-166553.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-2263.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-2315.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-2398.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-2410.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-2416.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-2433.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-2437.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-2438.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-2443.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-2444.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-2489.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-2499.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-157019.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-157520.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-158185.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-160010.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-162085.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-crbug-170856.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-delete-empty-double.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-json-stringify-gc.js delete mode 100644 deps/v8/test/mjsunit/regress/regress-observe-empty-double-array.js delete mode 100644 deps/v8/test/mjsunit/shift-for-integer-div.js delete mode 100644 deps/v8/test/mjsunit/stack-traces-overflow.js delete mode 100644 deps/v8/test/mjsunit/string-natives.js delete mode 100755 deps/v8/tools/plot-timer-events delete mode 100644 deps/v8/tools/plot-timer-events.js delete mode 100755 deps/v8/tools/run-llprof.sh delete mode 100644 deps/v8/tools/tick-processor.html diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore index fe8425f..0bf9313 100644 --- a/deps/v8/.gitignore +++ b/deps/v8/.gitignore @@ -18,7 +18,6 @@ #*# *~ .cpplint-cache -.d8_history d8 d8_g shell @@ -51,7 +50,3 @@ shell_g /xcodebuild TAGS *.Makefile -GTAGS -GRTAGS -GSYMS -GPATH diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index c279e7c..1156d94 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -20,7 +20,6 @@ Burcu Dogan Craig Schlenter Daniel Andersson Daniel James -Derek J Conrod Dineel D Sule Erich Ocean Fedor Indutny @@ -45,7 +44,6 @@ Paolo Giarrusso Patrick Gansterer Peter Varga Rafal Krypa -Rajeev R Krithivasan Rene Rebe Robert Mustacchi Rodolph Perfetta @@ -55,7 +53,6 @@ Sanjoy Das Subrato K De Tobias Burnus Vlad Burlik -Xi Qian Yuqiang Xian Zaheer Ahmad Zhongping Wang diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 52601a4..7c435c8 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,161 +1,3 @@ -2012-12-10: Version 3.15.11 - - Define CAN_USE_VFP2/3_INSTRUCTIONS based on arm_neon and arm_fpu GYP - flags. - - Performance and stability improvements on all platforms. - - -2012-12-07: Version 3.15.10 - - Enabled optimisation of functions inside eval. (issue 2315) - - Fixed spec violations in methods of Number.prototype. (issue 2443) - - Added GCTracer metrics for a scavenger GC for DOM wrappers. - - Performance and stability improvements on all platforms. - - -2012-12-06: Version 3.15.9 - - Fixed candidate eviction in code flusher. - (Chromium issue 159140) - - Iterate through all arguments for side effects in Math.min/max. - (issue 2444) - - Fixed spec violations related to regexp.lastIndex - (issue 2437, issue 2438) - - Performance and stability improvements on all platforms. - - -2012-12-04: Version 3.15.8 - - Enforced stack allocation of TryCatch blocks. - (issue 2166,chromium:152389) - - Fixed external exceptions in external try-catch handlers. - (issue 2166) - - Activated incremental code flushing by default. - - Performance and stability improvements on all platforms. - - -2012-11-30: Version 3.15.7 - - Activated code aging by default. - - Included more information in --prof log. - - Removed eager sweeping for lazy swept spaces. Try to find in - SlowAllocateRaw a bounded number of times a big enough memory slot. - (issue 2194) - - Performance and stability improvements on all platforms. - - -2012-11-26: Version 3.15.6 - - Ensure double arrays are filled with holes when extended from - variations of empty arrays. (Chromium issue 162085) - - Performance and stability improvements on all platforms. - - -2012-11-23: Version 3.15.5 - - Fixed JSON.stringify for objects with interceptor handlers. - (Chromium issue 161028) - - Fixed corner case in x64 compare stubs. (issue 2416) - - Performance and stability improvements on all platforms. - - -2012-11-16: Version 3.15.4 - - Fixed Array.prototype.join evaluation order. (issue 2263) - - Perform CPU sampling by CPU sampling thread only iff processing thread - is not running. (issue 2364) - - When using an Object as a set in Object.getOwnPropertyNames, null out - the proto. (issue 2410) - - Disabled EXTRA_CHECKS in Release build. - - Heap explorer: Show representation of strings. - - Removed 'type' and 'arguments' properties from Error object. - (issue 2397) - - Added atomics implementation for ThreadSanitizer v2. - (Chromium issue 128314) - - Fixed LiveEdit crashes when object/array literal is added. (issue 2368) - - Performance and stability improvements on all platforms. - - -2012-11-13: Version 3.15.3 - - Changed sample shell to send non-JS output (e.g. errors) to stderr - instead of stdout. - - Correctly check for stack overflow even when interrupt is pending. - (issue 214) - - Collect stack trace on stack overflow. (issue 2394) - - Performance and stability improvements on all platforms. - - -2012-11-12: Version 3.15.2 - - Function::GetScriptOrigin supplies sourceURL when script name is - not available. (Chromium issue 159413) - - Made formatting error message side-effect-free. (issue 2398) - - Fixed length check in JSON.stringify. (Chromium issue 160010) - - ES6: Added support for Set and Map clear method (issue 2400) - - Fixed slack tracking when instance prototype changes. - (Chromium issue 157019) - - Fixed disabling of code flusher while marking. (Chromium issue 159140) - - Added a test case for object grouping in a scavenger GC (issue 2077) - - Support shared library build of Android for v8. - (Chromium issue 158821) - - ES6: Added support for size to Set and Map (issue 2395) - - Performance and stability improvements on all platforms. - - -2012-11-06: Version 3.15.1 - - Put incremental code flushing behind a flag. (Chromium issue 159140) - - Performance and stability improvements on all platforms. - - -2012-10-31: Version 3.15.0 - - Loosened aligned code target requirement on ARM (issue 2380) - - Fixed JSON.parse to treat leading zeros correctly. - (Chromium issue 158185) - - Performance and stability improvements on all platforms. - - 2012-10-22: Version 3.14.5 Killed off the SCons based build. diff --git a/deps/v8/build/android.gypi b/deps/v8/build/android.gypi index 67a9d35..d2d1a35 100644 --- a/deps/v8/build/android.gypi +++ b/deps/v8/build/android.gypi @@ -122,6 +122,8 @@ 'ldflags': [ '-nostdlib', '-Wl,--no-undefined', + # Don't export symbols from statically linked libraries. + '-Wl,--exclude-libs=ALL', ], 'libraries!': [ '-lrt', # librt is built into Bionic. @@ -217,13 +219,6 @@ ['_type=="shared_library"', { 'ldflags': [ '-Wl,-shared,-Bsymbolic', - '<(android_lib)/crtbegin_so.o', - ], - }], - ['_type=="static_library"', { - 'ldflags': [ - # Don't export symbols from statically linked libraries. - '-Wl,--exclude-libs=ALL', ], }], ], diff --git a/deps/v8/build/common.gypi b/deps/v8/build/common.gypi index 44bebae..78888b8 100644 --- a/deps/v8/build/common.gypi +++ b/deps/v8/build/common.gypi @@ -70,6 +70,9 @@ 'v8_enable_disassembler%': 0, + # Enable extra checks in API functions and other strategic places. + 'v8_enable_extra_checks%': 1, + 'v8_enable_gdbjit%': 0, 'v8_object_print%': 0, @@ -111,6 +114,9 @@ ['v8_enable_disassembler==1', { 'defines': ['ENABLE_DISASSEMBLER',], }], + ['v8_enable_extra_checks==1', { + 'defines': ['ENABLE_EXTRA_CHECKS',], + }], ['v8_enable_gdbjit==1', { 'defines': ['ENABLE_GDB_JIT_INTERFACE',], }], @@ -128,11 +134,6 @@ 'V8_TARGET_ARCH_ARM', ], 'conditions': [ - ['armv7==1', { - 'defines': [ - 'CAN_USE_ARMV7_INSTRUCTIONS=1', - ], - }], [ 'v8_can_use_unaligned_accesses=="true"', { 'defines': [ 'CAN_USE_UNALIGNED_ACCESSES=1', @@ -143,16 +144,12 @@ 'CAN_USE_UNALIGNED_ACCESSES=0', ], }], - # NEON implies VFP3 and VFP3 implies VFP2. - [ 'v8_can_use_vfp2_instructions=="true" or arm_neon==1 or \ - arm_fpu=="vfpv3" or arm_fpu=="vfpv3-d16"', { + [ 'v8_can_use_vfp2_instructions=="true"', { 'defines': [ 'CAN_USE_VFP2_INSTRUCTIONS', ], }], - # NEON implies VFP3. - [ 'v8_can_use_vfp3_instructions=="true" or arm_neon==1 or \ - arm_fpu=="vfpv3" or arm_fpu=="vfpv3-d16"', { + [ 'v8_can_use_vfp3_instructions=="true"', { 'defines': [ 'CAN_USE_VFP3_INSTRUCTIONS', ], @@ -160,7 +157,7 @@ [ 'v8_use_arm_eabi_hardfloat=="true"', { 'defines': [ 'USE_EABI_HARDFLOAT=1', - 'CAN_USE_VFP2_INSTRUCTIONS', + 'CAN_USE_VFP3_INSTRUCTIONS', ], 'target_conditions': [ ['_toolset=="target"', { @@ -203,11 +200,10 @@ ['mips_arch_variant=="mips32r2"', { 'cflags': ['-mips32r2', '-Wa,-mips32r2'], }], - ['mips_arch_variant=="mips32r1"', { - 'cflags': ['-mips32', '-Wa,-mips32'], - }], ['mips_arch_variant=="loongson"', { 'cflags': ['-mips3', '-Wa,-mips3'], + }, { + 'cflags': ['-mips32', '-Wa,-mips32'], }], ], }], @@ -334,9 +330,6 @@ ], # conditions 'configurations': { 'Debug': { - 'variables': { - 'v8_enable_extra_checks%': 1, - }, 'defines': [ 'DEBUG', 'ENABLE_DISASSEMBLER', @@ -361,9 +354,6 @@ }, }, 'conditions': [ - ['v8_enable_extra_checks==1', { - 'defines': ['ENABLE_EXTRA_CHECKS',], - }], ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', { 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', '-Wnon-virtual-dtor', '-Woverloaded-virtual' ], @@ -382,23 +372,21 @@ }], ], }], - ['OS=="mac"', { - 'xcode_settings': { - 'GCC_OPTIMIZATION_LEVEL': '0', # -O0 - }, - }], ], }, # Debug 'Release': { - 'variables': { - 'v8_enable_extra_checks%': 0, - }, 'conditions': [ - ['v8_enable_extra_checks==1', { - 'defines': ['ENABLE_EXTRA_CHECKS',], - }], ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \ or OS=="android"', { + 'cflags!': [ + '-O2', + '-Os', + ], + 'cflags': [ + '-fdata-sections', + '-ffunction-sections', + '-O3', + ], 'conditions': [ [ 'gcc_version==44 and clang==0', { 'cflags': [ diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h index 4d3597a..c1e9a9e 100644 --- a/deps/v8/include/v8-profiler.h +++ b/deps/v8/include/v8-profiler.h @@ -407,28 +407,13 @@ class V8EXPORT HeapProfiler { static const SnapshotObjectId kUnknownObjectId = 0; /** - * Callback interface for retrieving user friendly names of global objects. - */ - class ObjectNameResolver { - public: - /** - * Returns name to be used in the heap snapshot for given node. Returned - * string must stay alive until snapshot collection is completed. - */ - virtual const char* GetName(Handle object) = 0; - protected: - virtual ~ObjectNameResolver() {} - }; - - /** * Takes a heap snapshot and returns it. Title may be an empty string. * See HeapSnapshot::Type for types description. */ static const HeapSnapshot* TakeSnapshot( Handle title, HeapSnapshot::Type type = HeapSnapshot::kFull, - ActivityControl* control = NULL, - ObjectNameResolver* global_object_name_resolver = NULL); + ActivityControl* control = NULL); /** * Starts tracking of heap objects population statistics. After calling diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index f577e93..245dc5a 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -76,22 +76,6 @@ #endif // _WIN32 -#if defined(__GNUC__) && !defined(DEBUG) -#define V8_INLINE(declarator) inline __attribute__((always_inline)) declarator -#elif defined(_MSC_VER) && !defined(DEBUG) -#define V8_INLINE(declarator) __forceinline declarator -#else -#define V8_INLINE(declarator) inline declarator -#endif - -#if defined(__GNUC__) && !V8_DISABLE_DEPRECATIONS -#define V8_DEPRECATED(declarator) declarator __attribute__ ((deprecated)) -#elif defined(_MSC_VER) && !V8_DISABLE_DEPRECATIONS -#define V8_DEPRECATED(declarator) __declspec(deprecated) declarator -#else -#define V8_DEPRECATED(declarator) declarator -#endif - /** * The v8 JavaScript engine. */ @@ -192,12 +176,12 @@ template class Handle { /** * Creates an empty handle. */ - V8_INLINE(Handle()) : val_(0) {} + inline Handle() : val_(0) {} /** * Creates a new handle for the specified value. */ - V8_INLINE(explicit Handle(T* val)) : val_(val) {} + inline explicit Handle(T* val) : val_(val) {} /** * Creates a handle for the contents of the specified handle. This @@ -209,7 +193,7 @@ template class Handle { * Handle to a variable declared as Handle, is legal * because String is a subclass of Value. */ - template V8_INLINE(Handle(Handle that)) + template inline Handle(Handle that) : val_(reinterpret_cast(*that)) { /** * This check fails when trying to convert between incompatible @@ -222,16 +206,16 @@ template class Handle { /** * Returns true if the handle is empty. */ - V8_INLINE(bool IsEmpty() const) { return val_ == 0; } + inline bool IsEmpty() const { return val_ == 0; } /** * Sets the handle to be empty. IsEmpty() will then return true. */ - V8_INLINE(void Clear()) { val_ = 0; } + inline void Clear() { val_ = 0; } - V8_INLINE(T* operator->() const) { return val_; } + inline T* operator->() const { return val_; } - V8_INLINE(T* operator*() const) { return val_; } + inline T* operator*() const { return val_; } /** * Checks whether two handles are the same. @@ -239,7 +223,7 @@ template class Handle { * to which they refer are identical. * The handles' references are not checked. */ - template V8_INLINE(bool operator==(Handle that) const) { + template inline bool operator==(Handle that) const { internal::Object** a = reinterpret_cast(**this); internal::Object** b = reinterpret_cast(*that); if (a == 0) return b == 0; @@ -253,11 +237,11 @@ template class Handle { * the objects to which they refer are different. * The handles' references are not checked. */ - template V8_INLINE(bool operator!=(Handle that) const) { + template inline bool operator!=(Handle that) const { return !operator==(that); } - template V8_INLINE(static Handle Cast(Handle that)) { + template static inline Handle Cast(Handle that) { #ifdef V8_ENABLE_CHECKS // If we're going to perform the type check then we have to check // that the handle isn't empty before doing the checked cast. @@ -266,7 +250,7 @@ template class Handle { return Handle(T::Cast(*that)); } - template V8_INLINE(Handle As()) { + template inline Handle As() { return Handle::Cast(*this); } @@ -284,8 +268,8 @@ template class Handle { */ template class Local : public Handle { public: - V8_INLINE(Local()); - template V8_INLINE(Local(Local that)) + inline Local(); + template inline Local(Local that) : Handle(reinterpret_cast(*that)) { /** * This check fails when trying to convert between incompatible @@ -294,8 +278,8 @@ template class Local : public Handle { */ TYPE_CHECK(T, S); } - template V8_INLINE(Local(S* that) : Handle(that)) { } - template V8_INLINE(static Local Cast(Local that)) { + template inline Local(S* that) : Handle(that) { } + template static inline Local Cast(Local that) { #ifdef V8_ENABLE_CHECKS // If we're going to perform the type check then we have to check // that the handle isn't empty before doing the checked cast. @@ -304,17 +288,15 @@ template class Local : public Handle { return Local(T::Cast(*that)); } - template V8_INLINE(Local As()) { + template inline Local As() { return Local::Cast(*this); } - /** - * Create a local handle for the content of another handle. - * The referee is kept alive by the local handle even when - * the original handle is destroyed/disposed. + /** Create a local handle for the content of another handle. + * The referee is kept alive by the local handle even when + * the original handle is destroyed/disposed. */ - V8_INLINE(static Local New(Handle that)); - V8_INLINE(static Local New(Isolate* isolate, Handle that)); + inline static Local New(Handle that); }; @@ -341,7 +323,7 @@ template class Persistent : public Handle { * Creates an empty persistent handle that doesn't point to any * storage cell. */ - V8_INLINE(Persistent()); + inline Persistent(); /** * Creates a persistent handle for the same storage cell as the @@ -354,7 +336,7 @@ template class Persistent : public Handle { * Persistent to a variable declared as Persistent, * is allowed as String is a subclass of Value. */ - template V8_INLINE(Persistent(Persistent that)) + template inline Persistent(Persistent that) : Handle(reinterpret_cast(*that)) { /** * This check fails when trying to convert between incompatible @@ -364,16 +346,16 @@ template class Persistent : public Handle { TYPE_CHECK(T, S); } - template V8_INLINE(Persistent(S* that)) : Handle(that) { } + template inline Persistent(S* that) : Handle(that) { } /** * "Casts" a plain handle which is known to be a persistent handle * to a persistent handle. */ - template explicit V8_INLINE(Persistent(Handle that)) + template explicit inline Persistent(Handle that) : Handle(*that) { } - template V8_INLINE(static Persistent Cast(Persistent that)) { + template static inline Persistent Cast(Persistent that) { #ifdef V8_ENABLE_CHECKS // If we're going to perform the type check then we have to check // that the handle isn't empty before doing the checked cast. @@ -382,7 +364,7 @@ template class Persistent : public Handle { return Persistent(T::Cast(*that)); } - template V8_INLINE(Persistent As()) { + template inline Persistent As() { return Persistent::Cast(*this); } @@ -390,7 +372,7 @@ template class Persistent : public Handle { * Creates a new persistent handle for an existing local or * persistent handle. */ - V8_INLINE(static Persistent New(Handle that)); + inline static Persistent New(Handle that); /** * Releases the storage cell referenced by this persistent handle. @@ -398,8 +380,7 @@ template class Persistent : public Handle { * This handle's reference, and any other references to the storage * cell remain and IsEmpty will still return false. */ - V8_INLINE(void Dispose()); - V8_INLINE(void Dispose(Isolate* isolate)); + inline void Dispose(); /** * Make the reference to this object weak. When only weak handles @@ -407,13 +388,10 @@ template class Persistent : public Handle { * callback to the given V8::WeakReferenceCallback function, passing * it the object reference and the given parameters. */ - V8_INLINE(void MakeWeak(void* parameters, WeakReferenceCallback callback)); - V8_INLINE(void MakeWeak(Isolate* isolate, - void* parameters, - WeakReferenceCallback callback)); + inline void MakeWeak(void* parameters, WeakReferenceCallback callback); /** Clears the weak reference to this object. */ - V8_INLINE(void ClearWeak()); + inline void ClearWeak(); /** * Marks the reference to this object independent. Garbage collector @@ -422,42 +400,28 @@ template class Persistent : public Handle { * assume that it will be preceded by a global GC prologue callback * or followed by a global GC epilogue callback. */ - V8_INLINE(void MarkIndependent()); - V8_INLINE(void MarkIndependent(Isolate* isolate)); - - /** - * Marks the reference to this object partially dependent. Partially - * dependent handles only depend on other partially dependent handles and - * these dependencies are provided through object groups. It provides a way - * to build smaller object groups for young objects that represent only a - * subset of all external dependencies. This mark is automatically cleared - * after each garbage collection. - */ - V8_INLINE(void MarkPartiallyDependent()); - V8_INLINE(void MarkPartiallyDependent(Isolate* isolate)); + inline void MarkIndependent(); /** Returns true if this handle was previously marked as independent. */ - V8_INLINE(bool IsIndependent() const); - V8_INLINE(bool IsIndependent(Isolate* isolate) const); + inline bool IsIndependent() const; /** Checks if the handle holds the only reference to an object. */ - V8_INLINE(bool IsNearDeath() const); + inline bool IsNearDeath() const; /** Returns true if the handle's reference is weak. */ - V8_INLINE(bool IsWeak() const); - V8_INLINE(bool IsWeak(Isolate* isolate) const); + inline bool IsWeak() const; /** * Assigns a wrapper class ID to the handle. See RetainedObjectInfo * interface description in v8-profiler.h for details. */ - V8_INLINE(void SetWrapperClassId(uint16_t class_id)); + inline void SetWrapperClassId(uint16_t class_id); /** * Returns the class ID previously assigned to this handle or 0 if no class * ID was previously assigned. */ - V8_INLINE(uint16_t WrapperClassId() const); + inline uint16_t WrapperClassId() const; private: friend class ImplementationUtilities; @@ -500,14 +464,12 @@ class V8EXPORT HandleScope { * Creates a new handle with the given value. */ static internal::Object** CreateHandle(internal::Object* value); - static internal::Object** CreateHandle(internal::Isolate* isolate, - internal::Object* value); // Faster version, uses HeapObject to obtain the current Isolate. static internal::Object** CreateHandle(internal::HeapObject* value); private: - // Make it hard to create heap-allocated or illegal handle scopes by - // disallowing certain operations. + // Make it impossible to create heap-allocated or illegal handle + // scopes by disallowing certain operations. HandleScope(const HandleScope&); void operator=(const HandleScope&); void* operator new(size_t size); @@ -520,7 +482,7 @@ class V8EXPORT HandleScope { internal::Object** next; internal::Object** limit; int level; - V8_INLINE(void Initialize()) { + inline void Initialize() { next = limit = NULL; level = 0; } @@ -613,16 +575,16 @@ class V8EXPORT ScriptData { // NOLINT */ class ScriptOrigin { public: - V8_INLINE(ScriptOrigin( + inline ScriptOrigin( Handle resource_name, Handle resource_line_offset = Handle(), - Handle resource_column_offset = Handle())) + Handle resource_column_offset = Handle()) : resource_name_(resource_name), resource_line_offset_(resource_line_offset), resource_column_offset_(resource_column_offset) { } - V8_INLINE(Handle ResourceName() const); - V8_INLINE(Handle ResourceLineOffset() const); - V8_INLINE(Handle ResourceColumnOffset() const); + inline Handle ResourceName() const; + inline Handle ResourceLineOffset() const; + inline Handle ResourceColumnOffset() const; private: Handle resource_name_; Handle resource_line_offset_; @@ -910,13 +872,13 @@ class Value : public Data { * Returns true if this value is the undefined value. See ECMA-262 * 4.3.10. */ - V8_INLINE(bool IsUndefined() const); + inline bool IsUndefined() const; /** * Returns true if this value is the null value. See ECMA-262 * 4.3.11. */ - V8_INLINE(bool IsNull() const); + inline bool IsNull() const; /** * Returns true if this value is true. @@ -932,7 +894,7 @@ class Value : public Data { * Returns true if this value is an instance of the String type. * See ECMA-262 8.4. */ - V8_INLINE(bool IsString() const); + inline bool IsString() const; /** * Returns true if this value is a function. @@ -1030,9 +992,9 @@ class Value : public Data { V8EXPORT bool StrictEquals(Handle that) const; private: - V8_INLINE(bool QuickIsUndefined() const); - V8_INLINE(bool QuickIsNull() const); - V8_INLINE(bool QuickIsString() const); + inline bool QuickIsUndefined() const; + inline bool QuickIsNull() const; + inline bool QuickIsString() const; V8EXPORT bool FullIsUndefined() const; V8EXPORT bool FullIsNull() const; V8EXPORT bool FullIsString() const; @@ -1052,7 +1014,7 @@ class Primitive : public Value { }; class Boolean : public Primitive { public: V8EXPORT bool Value() const; - V8_INLINE(static Handle New(bool value)); + static inline Handle New(bool value); }; @@ -1137,7 +1099,7 @@ class String : public Primitive { * A zero length string. */ V8EXPORT static v8::Local Empty(); - V8_INLINE(static v8::Local Empty(Isolate* isolate)); + inline static v8::Local Empty(Isolate* isolate); /** * Returns true if the string is external @@ -1233,14 +1195,14 @@ class String : public Primitive { * regardless of the encoding, otherwise return NULL. The encoding of the * string is returned in encoding_out. */ - V8_INLINE(ExternalStringResourceBase* GetExternalStringResourceBase( - Encoding* encoding_out) const); + inline ExternalStringResourceBase* GetExternalStringResourceBase( + Encoding* encoding_out) const; /** * Get the ExternalStringResource for an external string. Returns * NULL if IsExternal() doesn't return true. */ - V8_INLINE(ExternalStringResource* GetExternalStringResource() const); + inline ExternalStringResource* GetExternalStringResource() const; /** * Get the ExternalAsciiStringResource for an external ASCII string. @@ -1249,7 +1211,7 @@ class String : public Primitive { V8EXPORT const ExternalAsciiStringResource* GetExternalAsciiStringResource() const; - V8_INLINE(static String* Cast(v8::Value* obj)); + static inline String* Cast(v8::Value* obj); /** * Allocates a new string from either UTF-8 encoded or ASCII data. @@ -1413,7 +1375,7 @@ class Number : public Primitive { public: V8EXPORT double Value() const; V8EXPORT static Local New(double value); - V8_INLINE(static Number* Cast(v8::Value* obj)); + static inline Number* Cast(v8::Value* obj); private: V8EXPORT Number(); V8EXPORT static void CheckCast(v8::Value* obj); @@ -1430,7 +1392,7 @@ class Integer : public Number { V8EXPORT static Local New(int32_t value, Isolate*); V8EXPORT static Local NewFromUnsigned(uint32_t value, Isolate*); V8EXPORT int64_t Value() const; - V8_INLINE(static Integer* Cast(v8::Value* obj)); + static inline Integer* Cast(v8::Value* obj); private: V8EXPORT Integer(); V8EXPORT static void CheckCast(v8::Value* obj); @@ -1625,42 +1587,16 @@ class Object : public Value { /** Gets the number of internal fields for this Object. */ V8EXPORT int InternalFieldCount(); - - /** Gets the value from an internal field. */ - V8_INLINE(Local GetInternalField(int index)); - + /** Gets the value in an internal field. */ + inline Local GetInternalField(int index); /** Sets the value in an internal field. */ V8EXPORT void SetInternalField(int index, Handle value); - /** - * Gets a native pointer from an internal field. Deprecated. If the pointer is - * always 2-byte-aligned, use GetAlignedPointerFromInternalField instead, - * otherwise use a combination of GetInternalField, External::Cast and - * External::Value. - */ - V8EXPORT V8_DEPRECATED(void* GetPointerFromInternalField(int index)); - - /** - * Sets a native pointer in an internal field. Deprecated. If the pointer is - * always 2-byte aligned, use SetAlignedPointerInInternalField instead, - * otherwise use a combination of External::New and SetInternalField. - */ - V8_DEPRECATED(V8_INLINE(void SetPointerInInternalField(int index, - void* value))); - - /** - * Gets a 2-byte-aligned native pointer from an internal field. This field - * must have been set by SetAlignedPointerInInternalField, everything else - * leads to undefined behavior. - */ - V8_INLINE(void* GetAlignedPointerFromInternalField(int index)); + /** Gets a native pointer from an internal field. */ + inline void* GetPointerFromInternalField(int index); - /** - * Sets a 2-byte-aligned native pointer in an internal field. To retrieve such - * a field, GetAlignedPointerFromInternalField must be used, everything else - * leads to undefined behavior. - */ - V8EXPORT void SetAlignedPointerInInternalField(int index, void* value); + /** Sets a native pointer in an internal field. */ + V8EXPORT void SetPointerInInternalField(int index, void* value); // Testers for local properties. V8EXPORT bool HasOwnProperty(Handle key); @@ -1786,13 +1722,19 @@ class Object : public Value { Handle argv[]); V8EXPORT static Local New(); - V8_INLINE(static Object* Cast(Value* obj)); + static inline Object* Cast(Value* obj); private: V8EXPORT Object(); V8EXPORT static void CheckCast(Value* obj); - V8EXPORT Local SlowGetInternalField(int index); - V8EXPORT void* SlowGetAlignedPointerFromInternalField(int index); + V8EXPORT Local CheckedGetInternalField(int index); + V8EXPORT void* SlowGetPointerFromInternalField(int index); + + /** + * If quick access to the internal field is possible this method + * returns the value. Otherwise an empty handle is returned. + */ + inline Local UncheckedGetInternalField(int index); }; @@ -1815,7 +1757,7 @@ class Array : public Object { */ V8EXPORT static Local New(int length = 0); - V8_INLINE(static Array* Cast(Value* obj)); + static inline Array* Cast(Value* obj); private: V8EXPORT Array(); V8EXPORT static void CheckCast(Value* obj); @@ -1855,7 +1797,7 @@ class Function : public Object { V8EXPORT int GetScriptColumnNumber() const; V8EXPORT Handle GetScriptId() const; V8EXPORT ScriptOrigin GetScriptOrigin() const; - V8_INLINE(static Function* Cast(Value* obj)); + static inline Function* Cast(Value* obj); V8EXPORT static const int kLineOffsetNotFound; private: @@ -1877,7 +1819,7 @@ class Date : public Object { */ V8EXPORT double NumberValue() const; - V8_INLINE(static Date* Cast(v8::Value* obj)); + static inline Date* Cast(v8::Value* obj); /** * Notification that the embedder has changed the time zone, @@ -1910,7 +1852,7 @@ class NumberObject : public Object { */ V8EXPORT double NumberValue() const; - V8_INLINE(static NumberObject* Cast(v8::Value* obj)); + static inline NumberObject* Cast(v8::Value* obj); private: V8EXPORT static void CheckCast(v8::Value* obj); @@ -1929,7 +1871,7 @@ class BooleanObject : public Object { */ V8EXPORT bool BooleanValue() const; - V8_INLINE(static BooleanObject* Cast(v8::Value* obj)); + static inline BooleanObject* Cast(v8::Value* obj); private: V8EXPORT static void CheckCast(v8::Value* obj); @@ -1948,7 +1890,7 @@ class StringObject : public Object { */ V8EXPORT Local StringValue() const; - V8_INLINE(static StringObject* Cast(v8::Value* obj)); + static inline StringObject* Cast(v8::Value* obj); private: V8EXPORT static void CheckCast(v8::Value* obj); @@ -1995,7 +1937,7 @@ class RegExp : public Object { */ V8EXPORT Flags GetFlags() const; - V8_INLINE(static RegExp* Cast(v8::Value* obj)); + static inline RegExp* Cast(v8::Value* obj); private: V8EXPORT static void CheckCast(v8::Value* obj); @@ -2003,22 +1945,29 @@ class RegExp : public Object { /** - * A JavaScript value that wraps a C++ void*. This type of value is mainly used - * to associate C++ data structures with JavaScript objects. + * A JavaScript value that wraps a C++ void*. This type of value is + * mainly used to associate C++ data structures with JavaScript + * objects. + * + * The Wrap function V8 will return the most optimal Value object wrapping the + * C++ void*. The type of the value is not guaranteed to be an External object + * and no assumptions about its type should be made. To access the wrapped + * value Unwrap should be used, all other operations on that object will lead + * to unpredictable results. */ class External : public Value { public: - /** Deprecated, use New instead. */ - V8_DEPRECATED(V8_INLINE(static Local Wrap(void* value))); - - /** Deprecated, use a combination of Cast and Value instead. */ - V8_DEPRECATED(V8_INLINE(static void* Unwrap(Handle obj))); + V8EXPORT static Local Wrap(void* data); + static inline void* Unwrap(Handle obj); V8EXPORT static Local New(void* value); - V8_INLINE(static External* Cast(Value* obj)); + static inline External* Cast(Value* obj); V8EXPORT void* Value() const; private: + V8EXPORT External(); V8EXPORT static void CheckCast(v8::Value* obj); + static inline void* QuickUnwrap(Handle obj); + V8EXPORT static void* FullUnwrap(Handle obj); }; @@ -2033,7 +1982,7 @@ class V8EXPORT Template : public Data { /** Adds a property to each instance created by this template.*/ void Set(Handle name, Handle value, PropertyAttribute attributes = None); - V8_INLINE(void Set(const char* name, Handle value)); + inline void Set(const char* name, Handle value); private: Template(); @@ -2050,14 +1999,14 @@ class V8EXPORT Template : public Data { */ class Arguments { public: - V8_INLINE(int Length() const); - V8_INLINE(Local operator[](int i) const); - V8_INLINE(Local Callee() const); - V8_INLINE(Local This() const); - V8_INLINE(Local Holder() const); - V8_INLINE(bool IsConstructCall() const); - V8_INLINE(Local Data() const); - V8_INLINE(Isolate* GetIsolate() const); + inline int Length() const; + inline Local operator[](int i) const; + inline Local Callee() const; + inline Local This() const; + inline Local Holder() const; + inline bool IsConstructCall() const; + inline Local Data() const; + inline Isolate* GetIsolate() const; private: static const int kIsolateIndex = 0; @@ -2066,10 +2015,10 @@ class Arguments { static const int kHolderIndex = -3; friend class ImplementationUtilities; - V8_INLINE(Arguments(internal::Object** implicit_args, + inline Arguments(internal::Object** implicit_args, internal::Object** values, int length, - bool is_construct_call)); + bool is_construct_call); internal::Object** implicit_args_; internal::Object** values_; int length_; @@ -2083,12 +2032,12 @@ class Arguments { */ class V8EXPORT AccessorInfo { public: - V8_INLINE(AccessorInfo(internal::Object** args)) + inline AccessorInfo(internal::Object** args) : args_(args) { } - V8_INLINE(Isolate* GetIsolate() const); - V8_INLINE(Local Data() const); - V8_INLINE(Local This() const); - V8_INLINE(Local Holder() const); + inline Isolate* GetIsolate() const; + inline Local Data() const; + inline Local This() const; + inline Local Holder() const; private: internal::Object** args_; @@ -2653,7 +2602,7 @@ void V8EXPORT RegisterExtension(Extension* extension); */ class V8EXPORT DeclareExtension { public: - V8_INLINE(DeclareExtension(Extension* extension)) { + inline DeclareExtension(Extension* extension) { RegisterExtension(extension); } }; @@ -2667,10 +2616,10 @@ Handle V8EXPORT Null(); Handle V8EXPORT True(); Handle V8EXPORT False(); -V8_INLINE(Handle Undefined(Isolate* isolate)); -V8_INLINE(Handle Null(Isolate* isolate)); -V8_INLINE(Handle True(Isolate* isolate)); -V8_INLINE(Handle False(Isolate* isolate)); +inline Handle Undefined(Isolate* isolate); +inline Handle Null(Isolate* isolate); +inline Handle True(Isolate* isolate); +inline Handle False(Isolate* isolate); /** @@ -2824,7 +2773,6 @@ class V8EXPORT HeapStatistics { HeapStatistics(); size_t total_heap_size() { return total_heap_size_; } size_t total_heap_size_executable() { return total_heap_size_executable_; } - size_t total_physical_size() { return total_physical_size_; } size_t used_heap_size() { return used_heap_size_; } size_t heap_size_limit() { return heap_size_limit_; } @@ -2833,15 +2781,11 @@ class V8EXPORT HeapStatistics { void set_total_heap_size_executable(size_t size) { total_heap_size_executable_ = size; } - void set_total_physical_size(size_t size) { - total_physical_size_ = size; - } void set_used_heap_size(size_t size) { used_heap_size_ = size; } void set_heap_size_limit(size_t size) { heap_size_limit_ = size; } size_t total_heap_size_; size_t total_heap_size_executable_; - size_t total_physical_size_; size_t used_heap_size_; size_t heap_size_limit_; @@ -2927,13 +2871,13 @@ class V8EXPORT Isolate { /** * Associate embedder-specific data with the isolate */ - V8_INLINE(void SetData(void* data)); + inline void SetData(void* data); /** * Retrieve embedder-specific data from the isolate. * Returns NULL if SetData has never been called. */ - V8_INLINE(void* GetData()); + inline void* GetData(); private: Isolate(); @@ -3205,6 +3149,12 @@ class V8EXPORT V8 { static void SetCreateHistogramFunction(CreateHistogramCallback); static void SetAddHistogramSampleFunction(AddHistogramSampleCallback); + /** + * Enables the computation of a sliding window of states. The sliding + * window information is recorded in statistics counters. + */ + static void EnableSlidingStateWindow(); + /** Callback function for reporting failed access checks.*/ static void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback); @@ -3299,19 +3249,12 @@ class V8EXPORT V8 { * After each garbage collection, object groups are removed. It is * intended to be used in the before-garbage-collection callback * function, for instance to simulate DOM tree connections among JS - * wrapper objects. Object groups for all dependent handles need to - * be provided for kGCTypeMarkSweepCompact collections, for all other - * garbage collection types it is sufficient to provide object groups - * for partially dependent handles only. + * wrapper objects. * See v8-profiler.h for RetainedObjectInfo interface description. */ static void AddObjectGroup(Persistent* objects, size_t length, RetainedObjectInfo* info = NULL); - static void AddObjectGroup(Isolate* isolate, - Persistent* objects, - size_t length, - RetainedObjectInfo* info = NULL); /** * Allows the host application to declare implicit references between @@ -3496,8 +3439,8 @@ class V8EXPORT V8 { /** * Iterates through all external resources referenced from current isolate - * heap. GC is not invoked prior to iterating, therefore there is no - * guarantee that visited objects are still alive. + * heap. This method is not expected to be used except for debugging purposes + * and may be quite slow. */ static void VisitExternalResources(ExternalResourceVisitor* visitor); @@ -3540,29 +3483,14 @@ class V8EXPORT V8 { static internal::Object** GlobalizeReference(internal::Object** handle); static void DisposeGlobal(internal::Object** global_handle); - static void DisposeGlobal(internal::Isolate* isolate, - internal::Object** global_handle); static void MakeWeak(internal::Object** global_handle, void* data, WeakReferenceCallback); - static void MakeWeak(internal::Isolate* isolate, - internal::Object** global_handle, - void* data, - WeakReferenceCallback); static void ClearWeak(internal::Object** global_handle); static void MarkIndependent(internal::Object** global_handle); - static void MarkIndependent(internal::Isolate* isolate, - internal::Object** global_handle); - static void MarkPartiallyDependent(internal::Object** global_handle); - static void MarkPartiallyDependent(internal::Isolate* isolate, - internal::Object** global_handle); static bool IsGlobalIndependent(internal::Object** global_handle); - static bool IsGlobalIndependent(internal::Isolate* isolate, - internal::Object** global_handle); static bool IsGlobalNearDeath(internal::Object** global_handle); static bool IsGlobalWeak(internal::Object** global_handle); - static bool IsGlobalWeak(internal::Isolate* isolate, - internal::Object** global_handle); static void SetWrapperClassId(internal::Object** global_handle, uint16_t class_id); static uint16_t GetWrapperClassId(internal::Object** global_handle); @@ -3580,9 +3508,7 @@ class V8EXPORT V8 { class V8EXPORT TryCatch { public: /** - * Creates a new try/catch block and registers it with v8. Note that - * all TryCatch blocks should be stack allocated because the memory - * location itself is compared against JavaScript try/catch blocks. + * Creates a new try/catch block and registers it with v8. */ TryCatch(); @@ -3672,12 +3598,6 @@ class V8EXPORT TryCatch { void SetCaptureMessage(bool value); private: - // Make it hard to create heap-allocated TryCatch blocks. - TryCatch(const TryCatch&); - void operator=(const TryCatch&); - void* operator new(size_t size); - void operator delete(void*, size_t); - v8::internal::Isolate* isolate_; void* next_; void* exception_; @@ -3819,45 +3739,12 @@ class V8EXPORT Context { static bool InContext(); /** - * Gets embedder data with index 0. Deprecated, use GetEmbedderData with index - * 0 instead. + * Associate an additional data object with the context. This is mainly used + * with the debugger to provide additional information on the context through + * the debugger API. */ - V8_DEPRECATED(V8_INLINE(Local GetData())); - - /** - * Sets embedder data with index 0. Deprecated, use SetEmbedderData with index - * 0 instead. - */ - V8_DEPRECATED(V8_INLINE(void SetData(Handle value))); - - /** - * Gets the embedder data with the given index, which must have been set by a - * previous call to SetEmbedderData with the same index. Note that index 0 - * currently has a special meaning for Chrome's debugger. - */ - V8_INLINE(Local GetEmbedderData(int index)); - - /** - * Sets the embedder data with the given index, growing the data as - * needed. Note that index 0 currently has a special meaning for Chrome's - * debugger. - */ - void SetEmbedderData(int index, Handle value); - - /** - * Gets a 2-byte-aligned native pointer from the embedder data with the given - * index, which must have bees set by a previous call to - * SetAlignedPointerInEmbedderData with the same index. Note that index 0 - * currently has a special meaning for Chrome's debugger. - */ - V8_INLINE(void* GetAlignedPointerFromEmbedderData(int index)); - - /** - * Sets a 2-byte-aligned native pointer in the embedder data with the given - * index, growing the data as needed. Note that index 0 currently has a - * special meaning for Chrome's debugger. - */ - void SetAlignedPointerInEmbedderData(int index, void* value); + void SetData(Handle data); + Local GetData(); /** * Control whether code generation from strings is allowed. Calling @@ -3893,10 +3780,10 @@ class V8EXPORT Context { */ class Scope { public: - explicit V8_INLINE(Scope(Handle context)) : context_(context) { + explicit inline Scope(Handle context) : context_(context) { context_->Enter(); } - V8_INLINE(~Scope()) { context_->Exit(); } + inline ~Scope() { context_->Exit(); } private: Handle context_; }; @@ -3906,9 +3793,6 @@ class V8EXPORT Context { friend class Script; friend class Object; friend class Function; - - Local SlowGetEmbedderData(int index); - void* SlowGetAlignedPointerFromEmbedderData(int index); }; @@ -4137,27 +4021,47 @@ template struct SmiTagging; template <> struct SmiTagging<4> { static const int kSmiShiftSize = 0; static const int kSmiValueSize = 31; - V8_INLINE(static int SmiToInt(internal::Object* value)) { + static inline int SmiToInt(internal::Object* value) { int shift_bits = kSmiTagSize + kSmiShiftSize; // Throw away top 32 bits and shift down (requires >> to be sign extending). return static_cast(reinterpret_cast(value)) >> shift_bits; } + + // For 32-bit systems any 2 bytes aligned pointer can be encoded as smi + // with a plain reinterpret_cast. + static const uintptr_t kEncodablePointerMask = 0x1; + static const int kPointerToSmiShift = 0; }; // Smi constants for 64-bit systems. template <> struct SmiTagging<8> { static const int kSmiShiftSize = 31; static const int kSmiValueSize = 32; - V8_INLINE(static int SmiToInt(internal::Object* value)) { + static inline int SmiToInt(internal::Object* value) { int shift_bits = kSmiTagSize + kSmiShiftSize; // Shift down and throw away top 32 bits. return static_cast(reinterpret_cast(value) >> shift_bits); } + + // To maximize the range of pointers that can be encoded + // in the available 32 bits, we require them to be 8 bytes aligned. + // This gives 2 ^ (32 + 3) = 32G address space covered. + // It might be not enough to cover stack allocated objects on some platforms. + static const int kPointerAlignment = 3; + + static const uintptr_t kEncodablePointerMask = + ~(uintptr_t(0xffffffff) << kPointerAlignment); + + static const int kPointerToSmiShift = + kSmiTagSize + kSmiShiftSize - kPointerAlignment; }; typedef SmiTagging PlatformSmiTagging; const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize; const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize; +const uintptr_t kEncodablePointerMask = + PlatformSmiTagging::kEncodablePointerMask; +const int kPointerToSmiShift = PlatformSmiTagging::kPointerToSmiShift; /** * This class exports constants and functionality from within v8 that @@ -4175,9 +4079,6 @@ class Internals { static const int kOddballKindOffset = 3 * kApiPointerSize; static const int kForeignAddressOffset = kApiPointerSize; static const int kJSObjectHeaderSize = 3 * kApiPointerSize; - static const int kFixedArrayHeaderSize = 2 * kApiPointerSize; - static const int kContextHeaderSize = 2 * kApiPointerSize; - static const int kContextEmbedderDataIndex = 54; static const int kFullStringRepresentationMask = 0x07; static const int kStringEncodingMask = 0x4; static const int kExternalTwoByteRepresentationTag = 0x02; @@ -4190,7 +4091,7 @@ class Internals { static const int kNullValueRootIndex = 7; static const int kTrueValueRootIndex = 8; static const int kFalseValueRootIndex = 9; - static const int kEmptySymbolRootIndex = 119; + static const int kEmptySymbolRootIndex = 117; static const int kJSObjectType = 0xaa; static const int kFirstNonstringType = 0x80; @@ -4200,80 +4101,85 @@ class Internals { static const int kUndefinedOddballKind = 5; static const int kNullOddballKind = 3; - V8_INLINE(static bool HasHeapObjectTag(internal::Object* value)) { + static inline bool HasHeapObjectTag(internal::Object* value) { return ((reinterpret_cast(value) & kHeapObjectTagMask) == kHeapObjectTag); } - V8_INLINE(static int SmiValue(internal::Object* value)) { + static inline bool HasSmiTag(internal::Object* value) { + return ((reinterpret_cast(value) & kSmiTagMask) == kSmiTag); + } + + static inline int SmiValue(internal::Object* value) { return PlatformSmiTagging::SmiToInt(value); } - V8_INLINE(static int GetInstanceType(internal::Object* obj)) { + static inline int GetInstanceType(internal::Object* obj) { typedef internal::Object O; O* map = ReadField(obj, kHeapObjectMapOffset); return ReadField(map, kMapInstanceTypeOffset); } - V8_INLINE(static int GetOddballKind(internal::Object* obj)) { + static inline int GetOddballKind(internal::Object* obj) { typedef internal::Object O; return SmiValue(ReadField(obj, kOddballKindOffset)); } - V8_INLINE(static bool IsExternalTwoByteString(int instance_type)) { + static inline void* GetExternalPointerFromSmi(internal::Object* value) { + const uintptr_t address = reinterpret_cast(value); + return reinterpret_cast(address >> kPointerToSmiShift); + } + + static inline void* GetExternalPointer(internal::Object* obj) { + if (HasSmiTag(obj)) { + return GetExternalPointerFromSmi(obj); + } else if (GetInstanceType(obj) == kForeignType) { + return ReadField(obj, kForeignAddressOffset); + } else { + return NULL; + } + } + + static inline bool IsExternalTwoByteString(int instance_type) { int representation = (instance_type & kFullStringRepresentationMask); return representation == kExternalTwoByteRepresentationTag; } - V8_INLINE(static bool IsInitialized(v8::Isolate* isolate)) { + static inline bool IsInitialized(v8::Isolate* isolate) { uint8_t* addr = reinterpret_cast(isolate) + kIsolateStateOffset; return *reinterpret_cast(addr) == 1; } - V8_INLINE(static void SetEmbedderData(v8::Isolate* isolate, void* data)) { + static inline void SetEmbedderData(v8::Isolate* isolate, void* data) { uint8_t* addr = reinterpret_cast(isolate) + kIsolateEmbedderDataOffset; *reinterpret_cast(addr) = data; } - V8_INLINE(static void* GetEmbedderData(v8::Isolate* isolate)) { + static inline void* GetEmbedderData(v8::Isolate* isolate) { uint8_t* addr = reinterpret_cast(isolate) + kIsolateEmbedderDataOffset; return *reinterpret_cast(addr); } - V8_INLINE(static internal::Object** GetRoot(v8::Isolate* isolate, - int index)) { + static inline internal::Object** GetRoot(v8::Isolate* isolate, int index) { uint8_t* addr = reinterpret_cast(isolate) + kIsolateRootsOffset; return reinterpret_cast(addr + index * kApiPointerSize); } template - V8_INLINE(static T ReadField(Object* ptr, int offset)) { + static inline T ReadField(Object* ptr, int offset) { uint8_t* addr = reinterpret_cast(ptr) + offset - kHeapObjectTag; return *reinterpret_cast(addr); } - template - V8_INLINE(static T ReadEmbedderData(Context* context, int index)) { - typedef internal::Object O; - typedef internal::Internals I; - O* ctx = *reinterpret_cast(context); - int embedder_data_offset = I::kContextHeaderSize + - (internal::kApiPointerSize * I::kContextEmbedderDataIndex); - O* embedder_data = I::ReadField(ctx, embedder_data_offset); - int value_offset = - I::kFixedArrayHeaderSize + (internal::kApiPointerSize * index); - return I::ReadField(embedder_data, value_offset); - } - - V8_INLINE(static bool CanCastToHeapObject(void* o)) { return false; } - V8_INLINE(static bool CanCastToHeapObject(Context* o)) { return true; } - V8_INLINE(static bool CanCastToHeapObject(String* o)) { return true; } - V8_INLINE(static bool CanCastToHeapObject(Object* o)) { return true; } - V8_INLINE(static bool CanCastToHeapObject(Message* o)) { return true; } - V8_INLINE(static bool CanCastToHeapObject(StackTrace* o)) { return true; } - V8_INLINE(static bool CanCastToHeapObject(StackFrame* o)) { return true; } + static inline bool CanCastToHeapObject(void* o) { return false; } + static inline bool CanCastToHeapObject(Context* o) { return true; } + static inline bool CanCastToHeapObject(String* o) { return true; } + static inline bool CanCastToHeapObject(Object* o) { return true; } + static inline bool CanCastToHeapObject(Message* o) { return true; } + static inline bool CanCastToHeapObject(StackTrace* o) { return true; } + static inline bool CanCastToHeapObject(StackFrame* o) { return true; } }; } // namespace internal @@ -4297,16 +4203,6 @@ Local Local::New(Handle that) { template - Local Local::New(Isolate* isolate, Handle that) { - if (that.IsEmpty()) return Local(); - T* that_ptr = *that; - internal::Object** p = reinterpret_cast(that_ptr); - return Local(reinterpret_cast(HandleScope::CreateHandle( - reinterpret_cast(isolate), *p))); -} - - -template Persistent Persistent::New(Handle that) { if (that.IsEmpty()) return Persistent(); internal::Object** p = reinterpret_cast(*that); @@ -4322,14 +4218,6 @@ bool Persistent::IsIndependent() const { template -bool Persistent::IsIndependent(Isolate* isolate) const { - if (this->IsEmpty()) return false; - return V8::IsGlobalIndependent(reinterpret_cast(isolate), - reinterpret_cast(**this)); -} - - -template bool Persistent::IsNearDeath() const { if (this->IsEmpty()) return false; return V8::IsGlobalNearDeath(reinterpret_cast(**this)); @@ -4344,14 +4232,6 @@ bool Persistent::IsWeak() const { template -bool Persistent::IsWeak(Isolate* isolate) const { - if (this->IsEmpty()) return false; - return V8::IsGlobalWeak(reinterpret_cast(isolate), - reinterpret_cast(**this)); -} - - -template void Persistent::Dispose() { if (this->IsEmpty()) return; V8::DisposeGlobal(reinterpret_cast(**this)); @@ -4359,14 +4239,6 @@ void Persistent::Dispose() { template -void Persistent::Dispose(Isolate* isolate) { - if (this->IsEmpty()) return; - V8::DisposeGlobal(reinterpret_cast(isolate), - reinterpret_cast(**this)); -} - - -template Persistent::Persistent() : Handle() { } template @@ -4377,15 +4249,6 @@ void Persistent::MakeWeak(void* parameters, WeakReferenceCallback callback) { } template -void Persistent::MakeWeak(Isolate* isolate, void* parameters, - WeakReferenceCallback callback) { - V8::MakeWeak(reinterpret_cast(isolate), - reinterpret_cast(**this), - parameters, - callback); -} - -template void Persistent::ClearWeak() { V8::ClearWeak(reinterpret_cast(**this)); } @@ -4396,23 +4259,6 @@ void Persistent::MarkIndependent() { } template -void Persistent::MarkIndependent(Isolate* isolate) { - V8::MarkIndependent(reinterpret_cast(isolate), - reinterpret_cast(**this)); -} - -template -void Persistent::MarkPartiallyDependent() { - V8::MarkPartiallyDependent(reinterpret_cast(**this)); -} - -template -void Persistent::MarkPartiallyDependent(Isolate* isolate) { - V8::MarkPartiallyDependent(reinterpret_cast(isolate), - reinterpret_cast(**this)); -} - -template void Persistent::SetWrapperClassId(uint16_t class_id) { V8::SetWrapperClassId(reinterpret_cast(**this), class_id); } @@ -4508,40 +4354,63 @@ void Template::Set(const char* name, v8::Handle value) { Local Object::GetInternalField(int index) { #ifndef V8_ENABLE_CHECKS + Local quick_result = UncheckedGetInternalField(index); + if (!quick_result.IsEmpty()) return quick_result; +#endif + return CheckedGetInternalField(index); +} + + +Local Object::UncheckedGetInternalField(int index) { typedef internal::Object O; typedef internal::Internals I; O* obj = *reinterpret_cast(this); - // Fast path: If the object is a plain JSObject, which is the common case, we - // know where to find the internal fields and can return the value directly. if (I::GetInstanceType(obj) == I::kJSObjectType) { + // If the object is a plain JSObject, which is the common case, + // we know where to find the internal fields and can return the + // value directly. int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index); O* value = I::ReadField(obj, offset); O** result = HandleScope::CreateHandle(value); return Local(reinterpret_cast(result)); + } else { + return Local(); } +} + + +void* External::Unwrap(Handle obj) { +#ifdef V8_ENABLE_CHECKS + return FullUnwrap(obj); +#else + return QuickUnwrap(obj); #endif - return SlowGetInternalField(index); } -void Object::SetPointerInInternalField(int index, void* value) { - SetInternalField(index, External::New(value)); +void* External::QuickUnwrap(Handle wrapper) { + typedef internal::Object O; + O* obj = *reinterpret_cast(const_cast(*wrapper)); + return internal::Internals::GetExternalPointer(obj); } -void* Object::GetAlignedPointerFromInternalField(int index) { -#ifndef V8_ENABLE_CHECKS +void* Object::GetPointerFromInternalField(int index) { typedef internal::Object O; typedef internal::Internals I; + O* obj = *reinterpret_cast(this); - // Fast path: If the object is a plain JSObject, which is the common case, we - // know where to find the internal fields and can return the value directly. + if (I::GetInstanceType(obj) == I::kJSObjectType) { + // If the object is a plain JSObject, which is the common case, + // we know where to find the internal fields and can return the + // value directly. int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index); - return I::ReadField(obj, offset); + O* value = I::ReadField(obj, offset); + return I::GetExternalPointer(value); } -#endif - return SlowGetAlignedPointerFromInternalField(index); + + return SlowGetPointerFromInternalField(index); } @@ -4733,16 +4602,6 @@ Function* Function::Cast(v8::Value* value) { } -Local External::Wrap(void* value) { - return External::New(value); -} - - -void* External::Unwrap(Handle obj) { - return External::Cast(*obj)->Value(); -} - - External* External::Cast(v8::Value* value) { #ifdef V8_ENABLE_CHECKS CheckCast(value); @@ -4819,37 +4678,6 @@ void* Isolate::GetData() { } -Local Context::GetData() { - return GetEmbedderData(0); -} - -void Context::SetData(Handle data) { - SetEmbedderData(0, data); -} - - -Local Context::GetEmbedderData(int index) { -#ifndef V8_ENABLE_CHECKS - typedef internal::Object O; - typedef internal::Internals I; - O** result = HandleScope::CreateHandle(I::ReadEmbedderData(this, index)); - return Local(reinterpret_cast(result)); -#else - return SlowGetEmbedderData(index); -#endif -} - - -void* Context::GetAlignedPointerFromEmbedderData(int index) { -#ifndef V8_ENABLE_CHECKS - typedef internal::Internals I; - return I::ReadEmbedderData(this, index); -#else - return SlowGetAlignedPointerFromEmbedderData(index); -#endif -} - - /** * \example shell.cc * A simple shell that takes a list of expressions on the diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc index 62f4045..821ef75 100644 --- a/deps/v8/samples/shell.cc +++ b/deps/v8/samples/shell.cc @@ -72,7 +72,7 @@ int main(int argc, char* argv[]) { v8::HandleScope handle_scope; v8::Persistent context = CreateShellContext(); if (context.IsEmpty()) { - fprintf(stderr, "Error creating context\n"); + printf("Error creating context\n"); return 1; } context->Enter(); @@ -226,8 +226,7 @@ int RunMain(int argc, char* argv[]) { // alone JavaScript engines. continue; } else if (strncmp(str, "--", 2) == 0) { - fprintf(stderr, - "Warning: unknown flag %s.\nTry --help for options\n", str); + printf("Warning: unknown flag %s.\nTry --help for options\n", str); } else if (strcmp(str, "-e") == 0 && i + 1 < argc) { // Execute argument given to -e option directly. v8::Handle file_name = v8::String::New("unnamed"); @@ -238,7 +237,7 @@ int RunMain(int argc, char* argv[]) { v8::Handle file_name = v8::String::New(str); v8::Handle source = ReadFile(str); if (source.IsEmpty()) { - fprintf(stderr, "Error reading '%s'\n", str); + printf("Error reading '%s'\n", str); continue; } if (!ExecuteString(source, file_name, false, true)) return 1; @@ -250,20 +249,20 @@ int RunMain(int argc, char* argv[]) { // The read-eval-execute loop of the shell. void RunShell(v8::Handle context) { - fprintf(stderr, "V8 version %s [sample shell]\n", v8::V8::GetVersion()); + printf("V8 version %s [sample shell]\n", v8::V8::GetVersion()); static const int kBufferSize = 256; // Enter the execution environment before evaluating any code. v8::Context::Scope context_scope(context); v8::Local name(v8::String::New("(shell)")); while (true) { char buffer[kBufferSize]; - fprintf(stderr, "> "); + printf("> "); char* str = fgets(buffer, kBufferSize, stdin); if (str == NULL) break; v8::HandleScope handle_scope; ExecuteString(v8::String::New(str), name, true, true); } - fprintf(stderr, "\n"); + printf("\n"); } @@ -311,31 +310,31 @@ void ReportException(v8::TryCatch* try_catch) { if (message.IsEmpty()) { // V8 didn't provide any extra information about this error; just // print the exception. - fprintf(stderr, "%s\n", exception_string); + printf("%s\n", exception_string); } else { // Print (filename):(line number): (message). v8::String::Utf8Value filename(message->GetScriptResourceName()); const char* filename_string = ToCString(filename); int linenum = message->GetLineNumber(); - fprintf(stderr, "%s:%i: %s\n", filename_string, linenum, exception_string); + printf("%s:%i: %s\n", filename_string, linenum, exception_string); // Print line of source code. v8::String::Utf8Value sourceline(message->GetSourceLine()); const char* sourceline_string = ToCString(sourceline); - fprintf(stderr, "%s\n", sourceline_string); + printf("%s\n", sourceline_string); // Print wavy underline (GetUnderline is deprecated). int start = message->GetStartColumn(); for (int i = 0; i < start; i++) { - fprintf(stderr, " "); + printf(" "); } int end = message->GetEndColumn(); for (int i = start; i < end; i++) { - fprintf(stderr, "^"); + printf("^"); } - fprintf(stderr, "\n"); + printf("\n"); v8::String::Utf8Value stack_trace(try_catch->StackTrace()); if (stack_trace.length() > 0) { const char* stack_trace_string = ToCString(stack_trace); - fprintf(stderr, "%s\n", stack_trace_string); + printf("%s\n", stack_trace_string); } } } diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc index efcaf8f..1bc9221 100644 --- a/deps/v8/src/accessors.cc +++ b/deps/v8/src/accessors.cc @@ -112,7 +112,7 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) { HandleScope scope(isolate); // Protect raw pointers. - Handle array_handle(JSArray::cast(object), isolate); + Handle object_handle(object, isolate); Handle value_handle(value, isolate); bool has_exception; @@ -122,7 +122,7 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) { if (has_exception) return Failure::Exception(); if (uint32_v->Number() == number_v->Number()) { - return array_handle->SetElementsLength(*uint32_v); + return Handle::cast(object_handle)->SetElementsLength(*uint32_v); } return isolate->Throw( *isolate->factory()->NewRangeError("invalid_array_length", @@ -465,46 +465,24 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) { MaybeObject* Accessors::FunctionSetPrototype(JSObject* object, - Object* value_raw, + Object* value, void*) { - Isolate* isolate = object->GetIsolate(); - Heap* heap = isolate->heap(); - JSFunction* function_raw = FindInstanceOf(object); - if (function_raw == NULL) return heap->undefined_value(); - if (!function_raw->should_have_prototype()) { + Heap* heap = object->GetHeap(); + JSFunction* function = FindInstanceOf(object); + if (function == NULL) return heap->undefined_value(); + if (!function->should_have_prototype()) { // Since we hit this accessor, object will have no prototype property. return object->SetLocalPropertyIgnoreAttributes(heap->prototype_symbol(), - value_raw, + value, NONE); } - HandleScope scope(isolate); - Handle function(function_raw, isolate); - Handle value(value_raw, isolate); - - Handle old_value; - bool is_observed = - FLAG_harmony_observation && - *function == object && - function->map()->is_observed(); - if (is_observed) { - if (function->has_prototype()) - old_value = handle(function->prototype(), isolate); - else - old_value = isolate->factory()->NewFunctionPrototype(function); - } - - Handle result; - MaybeObject* maybe_result = function->SetPrototype(*value); - if (!maybe_result->ToHandle(&result, isolate)) return maybe_result; - ASSERT(function->prototype() == *value); - - if (is_observed && !old_value->SameValue(*value)) { - JSObject::EnqueueChangeRecord( - function, "updated", isolate->factory()->prototype_symbol(), old_value); + Object* prototype; + { MaybeObject* maybe_prototype = function->SetPrototype(value); + if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype; } - - return *function; + ASSERT(function->prototype() == value); + return function; } @@ -671,6 +649,19 @@ const AccessorDescriptor Accessors::FunctionArguments = { // +static MaybeObject* CheckNonStrictCallerOrThrow( + Isolate* isolate, + JSFunction* caller) { + DisableAssertNoAllocation enable_allocation; + if (!caller->shared()->is_classic_mode()) { + return isolate->Throw( + *isolate->factory()->NewTypeError("strict_caller", + HandleVector(NULL, 0))); + } + return caller; +} + + class FrameFunctionIterator { public: FrameFunctionIterator(Isolate* isolate, const AssertNoAllocation& promise) @@ -757,14 +748,7 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) { if (caller->shared()->bound()) { return isolate->heap()->null_value(); } - // Censor if the caller is not a classic mode function. - // Change from ES5, which used to throw, see: - // https://bugs.ecmascript.org/show_bug.cgi?id=310 - if (!caller->shared()->is_classic_mode()) { - return isolate->heap()->null_value(); - } - - return caller; + return CheckNonStrictCallerOrThrow(isolate, caller); } @@ -780,7 +764,7 @@ const AccessorDescriptor Accessors::FunctionCaller = { // -static inline Object* GetPrototypeSkipHiddenPrototypes(Object* receiver) { +MaybeObject* Accessors::ObjectGetPrototype(Object* receiver, void*) { Object* current = receiver->GetPrototype(); while (current->IsJSObject() && JSObject::cast(current)->map()->is_hidden_prototype()) { @@ -790,36 +774,12 @@ static inline Object* GetPrototypeSkipHiddenPrototypes(Object* receiver) { } -MaybeObject* Accessors::ObjectGetPrototype(Object* receiver, void*) { - return GetPrototypeSkipHiddenPrototypes(receiver); -} - - -MaybeObject* Accessors::ObjectSetPrototype(JSObject* receiver_raw, - Object* value_raw, +MaybeObject* Accessors::ObjectSetPrototype(JSObject* receiver, + Object* value, void*) { - const bool kSkipHiddenPrototypes = true; + const bool skip_hidden_prototypes = true; // To be consistent with other Set functions, return the value. - if (!(FLAG_harmony_observation && receiver_raw->map()->is_observed())) - return receiver_raw->SetPrototype(value_raw, kSkipHiddenPrototypes); - - Isolate* isolate = receiver_raw->GetIsolate(); - HandleScope scope(isolate); - Handle receiver(receiver_raw); - Handle value(value_raw); - Handle old_value(GetPrototypeSkipHiddenPrototypes(*receiver)); - - MaybeObject* result = receiver->SetPrototype(*value, kSkipHiddenPrototypes); - Handle hresult; - if (!result->ToHandle(&hresult, isolate)) return result; - - Handle new_value(GetPrototypeSkipHiddenPrototypes(*receiver)); - if (!new_value->SameValue(*old_value)) { - JSObject::EnqueueChangeRecord(receiver, "prototype", - isolate->factory()->Proto_symbol(), - old_value); - } - return *hresult; + return receiver->SetPrototype(value, skip_hidden_prototypes); } diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 95e5340..e0ad29b 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -630,16 +630,7 @@ void V8::MakeWeak(i::Object** object, void* parameters, i::Isolate* isolate = i::Isolate::Current(); LOG_API(isolate, "MakeWeak"); isolate->global_handles()->MakeWeak(object, parameters, - callback); -} - - -void V8::MakeWeak(i::Isolate* isolate, i::Object** object, - void* parameters, WeakReferenceCallback callback) { - ASSERT(isolate == i::Isolate::Current()); - LOG_API(isolate, "MakeWeak"); - isolate->global_handles()->MakeWeak(object, parameters, - callback); + callback); } @@ -652,32 +643,11 @@ void V8::ClearWeak(i::Object** obj) { void V8::MarkIndependent(i::Object** object) { i::Isolate* isolate = i::Isolate::Current(); - LOG_API(isolate, "MarkIndependent"); - isolate->global_handles()->MarkIndependent(object); -} - - -void V8::MarkIndependent(i::Isolate* isolate, i::Object** object) { - ASSERT(isolate == i::Isolate::Current()); - LOG_API(isolate, "MarkIndependent"); + LOG_API(isolate, "MakeIndependent"); isolate->global_handles()->MarkIndependent(object); } -void V8::MarkPartiallyDependent(i::Object** object) { - i::Isolate* isolate = i::Isolate::Current(); - LOG_API(isolate, "MarkPartiallyDependent"); - isolate->global_handles()->MarkPartiallyDependent(object); -} - - -void V8::MarkPartiallyDependent(i::Isolate* isolate, i::Object** object) { - ASSERT(isolate == i::Isolate::Current()); - LOG_API(isolate, "MarkPartiallyDependent"); - isolate->global_handles()->MarkPartiallyDependent(object); -} - - bool V8::IsGlobalIndependent(i::Object** obj) { i::Isolate* isolate = i::Isolate::Current(); LOG_API(isolate, "IsGlobalIndependent"); @@ -686,14 +656,6 @@ bool V8::IsGlobalIndependent(i::Object** obj) { } -bool V8::IsGlobalIndependent(i::Isolate* isolate, i::Object** obj) { - ASSERT(isolate == i::Isolate::Current()); - LOG_API(isolate, "IsGlobalIndependent"); - if (!isolate->IsInitialized()) return false; - return i::GlobalHandles::IsIndependent(obj); -} - - bool V8::IsGlobalNearDeath(i::Object** obj) { i::Isolate* isolate = i::Isolate::Current(); LOG_API(isolate, "IsGlobalNearDeath"); @@ -710,14 +672,6 @@ bool V8::IsGlobalWeak(i::Object** obj) { } -bool V8::IsGlobalWeak(i::Isolate* isolate, i::Object** obj) { - ASSERT(isolate == i::Isolate::Current()); - LOG_API(isolate, "IsGlobalWeak"); - if (!isolate->IsInitialized()) return false; - return i::GlobalHandles::IsWeak(obj); -} - - void V8::DisposeGlobal(i::Object** obj) { i::Isolate* isolate = i::Isolate::Current(); LOG_API(isolate, "DisposeGlobal"); @@ -725,14 +679,6 @@ void V8::DisposeGlobal(i::Object** obj) { isolate->global_handles()->Destroy(obj); } - -void V8::DisposeGlobal(i::Isolate* isolate, i::Object** obj) { - ASSERT(isolate == i::Isolate::Current()); - LOG_API(isolate, "DisposeGlobal"); - if (!isolate->IsInitialized()) return; - isolate->global_handles()->Destroy(obj); -} - // --- H a n d l e s --- @@ -786,12 +732,6 @@ i::Object** HandleScope::CreateHandle(i::Object* value) { } -i::Object** HandleScope::CreateHandle(i::Isolate* isolate, i::Object* value) { - ASSERT(isolate == i::Isolate::Current()); - return i::HandleScope::CreateHandle(value, isolate); -} - - i::Object** HandleScope::CreateHandle(i::HeapObject* value) { ASSERT(value->IsHeapObject()); return reinterpret_cast( @@ -833,77 +773,33 @@ void Context::Exit() { } -static void* DecodeSmiToAligned(i::Object* value, const char* location) { - ApiCheck(value->IsSmi(), location, "Not a Smi"); - return reinterpret_cast(value); -} - - -static i::Smi* EncodeAlignedAsSmi(void* value, const char* location) { - i::Smi* smi = reinterpret_cast(value); - ApiCheck(smi->IsSmi(), location, "Pointer is not aligned"); - return smi; -} - - -static i::Handle EmbedderDataFor(Context* context, - int index, - bool can_grow, - const char* location) { - i::Handle env = Utils::OpenHandle(context); - bool ok = !IsDeadCheck(env->GetIsolate(), location) && - ApiCheck(env->IsNativeContext(), location, "Not a native context") && - ApiCheck(index >= 0, location, "Negative index"); - if (!ok) return i::Handle(); - i::Handle data(env->embedder_data()); - if (index < data->length()) return data; - if (!can_grow) { - Utils::ReportApiFailure(location, "Index too large"); - return i::Handle(); +void Context::SetData(v8::Handle data) { + i::Handle env = Utils::OpenHandle(this); + i::Isolate* isolate = env->GetIsolate(); + if (IsDeadCheck(isolate, "v8::Context::SetData()")) return; + i::Handle raw_data = Utils::OpenHandle(*data); + ASSERT(env->IsNativeContext()); + if (env->IsNativeContext()) { + env->set_data(*raw_data); } - int new_size = i::Max(index, data->length() << 1) + 1; - data = env->GetIsolate()->factory()->CopySizeFixedArray(data, new_size); - env->set_embedder_data(*data); - return data; } -v8::Local Context::SlowGetEmbedderData(int index) { - const char* location = "v8::Context::GetEmbedderData()"; - i::Handle data = EmbedderDataFor(this, index, false, location); - if (data.is_null()) return Local(); - i::Handle result(data->get(index), data->GetIsolate()); +v8::Local Context::GetData() { + i::Handle env = Utils::OpenHandle(this); + i::Isolate* isolate = env->GetIsolate(); + if (IsDeadCheck(isolate, "v8::Context::GetData()")) { + return Local(); + } + ASSERT(env->IsNativeContext()); + if (!env->IsNativeContext()) { + return Local(); + } + i::Handle result(env->data(), isolate); return Utils::ToLocal(result); } -void Context::SetEmbedderData(int index, v8::Handle value) { - const char* location = "v8::Context::SetEmbedderData()"; - i::Handle data = EmbedderDataFor(this, index, true, location); - if (data.is_null()) return; - i::Handle val = Utils::OpenHandle(*value); - data->set(index, *val); - ASSERT_EQ(*Utils::OpenHandle(*value), - *Utils::OpenHandle(*GetEmbedderData(index))); -} - - -void* Context::SlowGetAlignedPointerFromEmbedderData(int index) { - const char* location = "v8::Context::GetAlignedPointerFromEmbedderData()"; - i::Handle data = EmbedderDataFor(this, index, false, location); - if (data.is_null()) return NULL; - return DecodeSmiToAligned(data->get(index), location); -} - - -void Context::SetAlignedPointerInEmbedderData(int index, void* value) { - const char* location = "v8::Context::SetAlignedPointerInEmbedderData()"; - i::Handle data = EmbedderDataFor(this, index, true, location); - data->set(index, EncodeAlignedAsSmi(value, location)); - ASSERT_EQ(value, GetAlignedPointerFromEmbedderData(index)); -} - - i::Object** v8::HandleScope::RawClose(i::Object** value) { if (!ApiCheck(!is_closed_, "v8::HandleScope::Close()", @@ -925,7 +821,7 @@ i::Object** v8::HandleScope::RawClose(i::Object** value) { } // Allocate a new handle on the previous handle block. - i::Handle handle(result, isolate_); + i::Handle handle(result); return handle.location(); } @@ -1260,7 +1156,7 @@ void FunctionTemplate::SetHiddenPrototype(bool value) { void FunctionTemplate::ReadOnlyPrototype() { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::FunctionTemplate::ReadOnlyPrototype()")) { + if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetPrototypeAttributes()")) { return; } ENTER_V8(isolate); @@ -1704,8 +1600,6 @@ Local Script::Run() { ON_BAILOUT(isolate, "v8::Script::Run()", return Local()); LOG_API(isolate, "Script::Run"); ENTER_V8(isolate); - i::Logger::TimerEventScope timer_scope( - isolate, i::Logger::TimerEventScope::v8_execute); i::Object* raw_result = NULL; { i::HandleScope scope(isolate); @@ -2304,7 +2198,7 @@ bool Value::IsExternal() const { if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsExternal()")) { return false; } - return Utils::OpenHandle(this)->IsExternal(); + return Utils::OpenHandle(this)->IsForeign(); } @@ -2378,11 +2272,7 @@ static i::Object* LookupBuiltin(i::Isolate* isolate, static bool CheckConstructor(i::Isolate* isolate, i::Handle obj, const char* class_name) { - i::Object* constr = obj->map()->constructor(); - if (!constr->IsJSFunction()) return false; - i::JSFunction* func = i::JSFunction::cast(constr); - return func->shared()->native() && - constr == LookupBuiltin(isolate, class_name); + return obj->map()->constructor() == LookupBuiltin(isolate, class_name); } @@ -2537,7 +2427,8 @@ Local Value::ToInteger() const { void External::CheckCast(v8::Value* that) { if (IsDeadCheck(i::Isolate::Current(), "v8::External::Cast()")) return; - ApiCheck(Utils::OpenHandle(that)->IsExternal(), + i::Handle obj = Utils::OpenHandle(that); + ApiCheck(obj->IsForeign(), "v8::External::Cast()", "Could not convert to external"); } @@ -2882,7 +2773,6 @@ bool v8::Object::Set(v8::Handle key, v8::Handle value, i::Handle value_obj = Utils::OpenHandle(*value); EXCEPTION_PREAMBLE(isolate); i::Handle obj = i::SetProperty( - isolate, self, key_obj, value_obj, @@ -3437,7 +3327,7 @@ v8::Local v8::Object::GetHiddenValue(v8::Handle key) { i::Handle self = Utils::OpenHandle(this); i::Handle key_obj = Utils::OpenHandle(*key); i::Handle key_symbol = FACTORY->LookupSymbol(key_obj); - i::Handle result(self->GetHiddenProperty(*key_symbol), isolate); + i::Handle result(self->GetHiddenProperty(*key_symbol)); if (result->IsUndefined()) return v8::Local(); return Utils::ToLocal(result); } @@ -3674,8 +3564,6 @@ Local Object::CallAsFunction(v8::Handle recv, return Local()); LOG_API(isolate, "Object::CallAsFunction"); ENTER_V8(isolate); - i::Logger::TimerEventScope timer_scope( - isolate, i::Logger::TimerEventScope::v8_execute); i::HandleScope scope(isolate); i::Handle obj = Utils::OpenHandle(this); i::Handle recv_obj = Utils::OpenHandle(*recv); @@ -3707,8 +3595,6 @@ Local Object::CallAsConstructor(int argc, return Local()); LOG_API(isolate, "Object::CallAsConstructor"); ENTER_V8(isolate); - i::Logger::TimerEventScope timer_scope( - isolate, i::Logger::TimerEventScope::v8_execute); i::HandleScope scope(isolate); i::Handle obj = Utils::OpenHandle(this); STATIC_ASSERT(sizeof(v8::Handle) == sizeof(i::Object**)); @@ -3751,8 +3637,6 @@ Local Function::NewInstance(int argc, return Local()); LOG_API(isolate, "Function::NewInstance"); ENTER_V8(isolate); - i::Logger::TimerEventScope timer_scope( - isolate, i::Logger::TimerEventScope::v8_execute); HandleScope scope; i::Handle function = Utils::OpenHandle(this); STATIC_ASSERT(sizeof(v8::Handle) == sizeof(i::Object**)); @@ -3771,8 +3655,6 @@ Local Function::Call(v8::Handle recv, int argc, ON_BAILOUT(isolate, "v8::Function::Call()", return Local()); LOG_API(isolate, "Function::Call"); ENTER_V8(isolate); - i::Logger::TimerEventScope timer_scope( - isolate, i::Logger::TimerEventScope::v8_execute); i::Object* raw_result = NULL; { i::HandleScope scope(isolate); @@ -3816,9 +3698,8 @@ ScriptOrigin Function::GetScriptOrigin() const { i::Handle func = Utils::OpenHandle(this); if (func->shared()->script()->IsScript()) { i::Handle script(i::Script::cast(func->shared()->script())); - i::Handle scriptName = GetScriptNameOrSourceURL(script); v8::ScriptOrigin origin( - Utils::ToLocal(scriptName), + Utils::ToLocal(i::Handle(script->name())), v8::Integer::New(script->line_offset()->value()), v8::Integer::New(script->column_offset()->value())); return origin; @@ -3881,7 +3762,7 @@ static int RecursivelySerializeToUtf8(i::String* string, int32_t* last_character) { int utf8_bytes = 0; while (true) { - if (string->IsOneByteRepresentation()) { + if (string->IsAsciiRepresentation()) { i::String::WriteToFlat(string, buffer, start, end); *last_character = unibrow::Utf16::kNoPreviousCharacter; return utf8_bytes + end - start; @@ -3981,7 +3862,7 @@ int String::WriteUtf8(char* buffer, FlattenString(str); // Flatten the string for efficiency. } int string_length = str->length(); - if (str->IsOneByteRepresentation()) { + if (str->IsAsciiRepresentation()) { int len; if (capacity == -1) { capacity = str->length() + 1; @@ -4115,7 +3996,7 @@ int String::WriteAscii(char* buffer, FlattenString(str); // Flatten the string for efficiency. } - if (str->IsOneByteRepresentation()) { + if (str->IsAsciiRepresentation()) { // WriteToFlat is faster than using the StringInputBuffer. if (length == -1) length = str->length() + 1; int len = i::Min(length, str->length() - start); @@ -4230,7 +4111,7 @@ void v8::String::VerifyExternalStringResourceBase( expectedEncoding = TWO_BYTE_ENCODING; } else { expected = NULL; - expectedEncoding = str->IsOneByteRepresentation() ? ASCII_ENCODING + expectedEncoding = str->IsAsciiRepresentation() ? ASCII_ENCODING : TWO_BYTE_ENCODING; } CHECK_EQ(expected, value); @@ -4310,65 +4191,75 @@ int v8::Object::InternalFieldCount() { } -static bool InternalFieldOK(i::Handle obj, - int index, - const char* location) { - return !IsDeadCheck(obj->GetIsolate(), location) && - ApiCheck(index < obj->GetInternalFieldCount(), - location, - "Internal field out of bounds"); -} - - -Local v8::Object::SlowGetInternalField(int index) { +Local v8::Object::CheckedGetInternalField(int index) { i::Handle obj = Utils::OpenHandle(this); - const char* location = "v8::Object::GetInternalField()"; - if (!InternalFieldOK(obj, index, location)) return Local(); - i::Handle value(obj->GetInternalField(index), obj->GetIsolate()); - return Utils::ToLocal(value); + if (IsDeadCheck(obj->GetIsolate(), "v8::Object::GetInternalField()")) { + return Local(); + } + if (!ApiCheck(index < obj->GetInternalFieldCount(), + "v8::Object::GetInternalField()", + "Reading internal field out of bounds")) { + return Local(); + } + i::Handle value(obj->GetInternalField(index)); + Local result = Utils::ToLocal(value); +#ifdef DEBUG + Local unchecked = UncheckedGetInternalField(index); + ASSERT(unchecked.IsEmpty() || (unchecked == result)); +#endif + return result; } void v8::Object::SetInternalField(int index, v8::Handle value) { i::Handle obj = Utils::OpenHandle(this); - const char* location = "v8::Object::SetInternalField()"; - if (!InternalFieldOK(obj, index, location)) return; + i::Isolate* isolate = obj->GetIsolate(); + if (IsDeadCheck(isolate, "v8::Object::SetInternalField()")) { + return; + } + if (!ApiCheck(index < obj->GetInternalFieldCount(), + "v8::Object::SetInternalField()", + "Writing internal field out of bounds")) { + return; + } + ENTER_V8(isolate); i::Handle val = Utils::OpenHandle(*value); obj->SetInternalField(index, *val); - ASSERT_EQ(value, GetInternalField(index)); } -void* v8::Object::SlowGetAlignedPointerFromInternalField(int index) { - i::Handle obj = Utils::OpenHandle(this); - const char* location = "v8::Object::GetAlignedPointerFromInternalField()"; - if (!InternalFieldOK(obj, index, location)) return NULL; - return DecodeSmiToAligned(obj->GetInternalField(index), location); +static bool CanBeEncodedAsSmi(void* ptr) { + const uintptr_t address = reinterpret_cast(ptr); + return ((address & i::kEncodablePointerMask) == 0); } -void v8::Object::SetAlignedPointerInInternalField(int index, void* value) { - i::Handle obj = Utils::OpenHandle(this); - const char* location = "v8::Object::SetAlignedPointerInInternalField()"; - if (!InternalFieldOK(obj, index, location)) return; - obj->SetInternalField(index, EncodeAlignedAsSmi(value, location)); - ASSERT_EQ(value, GetAlignedPointerFromInternalField(index)); -} - - -static void* ExternalValue(i::Object* obj) { - // Obscure semantics for undefined, but somehow checked in our unit tests... - if (obj->IsUndefined()) return NULL; - i::Object* foreign = i::JSObject::cast(obj)->GetInternalField(0); - return i::Foreign::cast(foreign)->foreign_address(); +static i::Smi* EncodeAsSmi(void* ptr) { + ASSERT(CanBeEncodedAsSmi(ptr)); + const uintptr_t address = reinterpret_cast(ptr); + i::Smi* result = reinterpret_cast(address << i::kPointerToSmiShift); + ASSERT(i::Internals::HasSmiTag(result)); + ASSERT_EQ(result, i::Smi::FromInt(result->value())); + ASSERT_EQ(ptr, i::Internals::GetExternalPointerFromSmi(result)); + return result; } -void* Object::GetPointerFromInternalField(int index) { - i::Handle obj = Utils::OpenHandle(this); - const char* location = "v8::Object::GetPointerFromInternalField()"; - if (!InternalFieldOK(obj, index, location)) return NULL; - return ExternalValue(obj->GetInternalField(index)); +void v8::Object::SetPointerInInternalField(int index, void* value) { + i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + ENTER_V8(isolate); + if (CanBeEncodedAsSmi(value)) { + Utils::OpenHandle(this)->SetInternalField(index, EncodeAsSmi(value)); + } else { + HandleScope scope; + i::Handle foreign = + isolate->factory()->NewForeign( + reinterpret_cast(value), i::TENURED); + if (!foreign.is_null()) { + Utils::OpenHandle(this)->SetInternalField(index, *foreign); + } + } + ASSERT_EQ(value, GetPointerFromInternalField(index)); } @@ -4423,7 +4314,6 @@ bool v8::V8::Dispose() { HeapStatistics::HeapStatistics(): total_heap_size_(0), total_heap_size_executable_(0), - total_physical_size_(0), used_heap_size_(0), heap_size_limit_(0) { } @@ -4433,7 +4323,6 @@ void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) { // Isolate is unitialized thus heap is not configured yet. heap_statistics->set_total_heap_size(0); heap_statistics->set_total_heap_size_executable(0); - heap_statistics->set_total_physical_size(0); heap_statistics->set_used_heap_size(0); heap_statistics->set_heap_size_limit(0); return; @@ -4443,7 +4332,6 @@ void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) { heap_statistics->set_total_heap_size(heap->CommittedMemory()); heap_statistics->set_total_heap_size_executable( heap->CommittedMemoryExecutable()); - heap_statistics->set_total_physical_size(heap->CommittedPhysicalMemory()); heap_statistics->set_used_heap_size(heap->SizeOfObjects()); heap_statistics->set_heap_size_limit(heap->MaxReserved()); } @@ -4680,14 +4568,13 @@ v8::Local Context::GetCalling() { v8::Local Context::Global() { - i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Context::Global()")) { + if (IsDeadCheck(i::Isolate::Current(), "v8::Context::Global()")) { return Local(); } i::Object** ctx = reinterpret_cast(this); i::Handle context = i::Handle::cast(i::Handle(ctx)); - i::Handle global(context->global_proxy(), isolate); + i::Handle global(context->global_proxy()); return Utils::ToLocal(i::Handle::cast(global)); } @@ -4808,20 +4695,74 @@ bool FunctionTemplate::HasInstance(v8::Handle value) { } -Local v8::External::New(void* value) { - STATIC_ASSERT(sizeof(value) == sizeof(i::Address)); +static Local ExternalNewImpl(void* data) { + return Utils::ToLocal(FACTORY->NewForeign(static_cast(data))); +} + +static void* ExternalValueImpl(i::Handle obj) { + return reinterpret_cast(i::Foreign::cast(*obj)->foreign_address()); +} + + +Local v8::External::Wrap(void* data) { + i::Isolate* isolate = i::Isolate::Current(); + STATIC_ASSERT(sizeof(data) == sizeof(i::Address)); + EnsureInitializedForIsolate(isolate, "v8::External::Wrap()"); + LOG_API(isolate, "External::Wrap"); + ENTER_V8(isolate); + + v8::Local result = CanBeEncodedAsSmi(data) + ? Utils::ToLocal(i::Handle(EncodeAsSmi(data))) + : v8::Local(ExternalNewImpl(data)); + + ASSERT_EQ(data, Unwrap(result)); + return result; +} + + +void* v8::Object::SlowGetPointerFromInternalField(int index) { + i::Handle obj = Utils::OpenHandle(this); + i::Object* value = obj->GetInternalField(index); + if (value->IsSmi()) { + return i::Internals::GetExternalPointerFromSmi(value); + } else if (value->IsForeign()) { + return reinterpret_cast(i::Foreign::cast(value)->foreign_address()); + } else { + return NULL; + } +} + + +void* v8::External::FullUnwrap(v8::Handle wrapper) { + if (IsDeadCheck(i::Isolate::Current(), "v8::External::Unwrap()")) return 0; + i::Handle obj = Utils::OpenHandle(*wrapper); + void* result; + if (obj->IsSmi()) { + result = i::Internals::GetExternalPointerFromSmi(*obj); + } else if (obj->IsForeign()) { + result = ExternalValueImpl(obj); + } else { + result = NULL; + } + ASSERT_EQ(result, QuickUnwrap(wrapper)); + return result; +} + + +Local v8::External::New(void* data) { + STATIC_ASSERT(sizeof(data) == sizeof(i::Address)); i::Isolate* isolate = i::Isolate::Current(); EnsureInitializedForIsolate(isolate, "v8::External::New()"); LOG_API(isolate, "External::New"); ENTER_V8(isolate); - i::Handle external = isolate->factory()->NewExternal(value); - return Utils::ExternalToLocal(external); + return ExternalNewImpl(data); } void* External::Value() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::External::Value()")) return NULL; - return ExternalValue(*Utils::OpenHandle(this)); + if (IsDeadCheck(i::Isolate::Current(), "v8::External::Value()")) return 0; + i::Handle obj = Utils::OpenHandle(this); + return ExternalValueImpl(obj); } @@ -5391,6 +5332,13 @@ void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) { SetAddHistogramSampleFunction(callback); } +void V8::EnableSlidingStateWindow() { + i::Isolate* isolate = i::Isolate::Current(); + if (IsDeadCheck(isolate, "v8::V8::EnableSlidingStateWindow()")) return; + isolate->logger()->EnableSlidingStateWindow(); +} + + void V8::SetFailedAccessCheckCallbackFunction( FailedAccessCheckCallback callback) { i::Isolate* isolate = i::Isolate::Current(); @@ -5400,7 +5348,6 @@ void V8::SetFailedAccessCheckCallbackFunction( isolate->SetFailedAccessCheckCallback(callback); } - void V8::AddObjectGroup(Persistent* objects, size_t length, RetainedObjectInfo* info) { @@ -5412,19 +5359,6 @@ void V8::AddObjectGroup(Persistent* objects, } -void V8::AddObjectGroup(Isolate* exportedIsolate, - Persistent* objects, - size_t length, - RetainedObjectInfo* info) { - i::Isolate* isolate = reinterpret_cast(exportedIsolate); - ASSERT(isolate == i::Isolate::Current()); - if (IsDeadCheck(isolate, "v8::V8::AddObjectGroup()")) return; - STATIC_ASSERT(sizeof(Persistent) == sizeof(i::Object**)); - isolate->global_handles()->AddObjectGroup( - reinterpret_cast(objects), length, info); -} - - void V8::AddImplicitReferences(Persistent parent, Persistent* children, size_t length) { @@ -6435,8 +6369,7 @@ SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle value) { const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle title, HeapSnapshot::Type type, - ActivityControl* control, - ObjectNameResolver* resolver) { + ActivityControl* control) { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapProfiler::TakeSnapshot"); i::HeapSnapshot::Type internal_type = i::HeapSnapshot::kFull; @@ -6449,7 +6382,7 @@ const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle title, } return reinterpret_cast( i::HeapProfiler::TakeSnapshot( - *Utils::OpenHandle(*title), internal_type, control, resolver)); + *Utils::OpenHandle(*title), internal_type, control)); } @@ -6560,7 +6493,6 @@ void Testing::PrepareStressRun(int run) { void Testing::DeoptimizeAll() { - i::HandleScope scope; internal::Deoptimizer::DeoptimizeAll(); } diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h index ca2240b..7197b6c 100644 --- a/deps/v8/src/api.h +++ b/deps/v8/src/api.h @@ -201,6 +201,8 @@ class Utils { v8::internal::Handle obj); static inline Local ToLocal( v8::internal::Handle obj); + static inline Local ToLocal( + v8::internal::Handle obj); static inline Local MessageToLocal( v8::internal::Handle obj); static inline Local StackTraceToLocal( @@ -223,8 +225,6 @@ class Utils { v8::internal::Handle obj); static inline Local ToLocal( v8::internal::Handle obj); - static inline Local ExternalToLocal( - v8::internal::Handle obj); #define DECLARE_OPEN_HANDLE(From, To) \ static inline v8::internal::Handle \ @@ -268,6 +268,7 @@ MAKE_TO_LOCAL(ToLocal, String, String) MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp) MAKE_TO_LOCAL(ToLocal, JSObject, Object) MAKE_TO_LOCAL(ToLocal, JSArray, Array) +MAKE_TO_LOCAL(ToLocal, Foreign, External) MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate) MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate) MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature) @@ -279,7 +280,6 @@ MAKE_TO_LOCAL(StackFrameToLocal, JSObject, StackFrame) MAKE_TO_LOCAL(NumberToLocal, Object, Number) MAKE_TO_LOCAL(IntegerToLocal, Object, Integer) MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32) -MAKE_TO_LOCAL(ExternalToLocal, JSObject, External) #undef MAKE_TO_LOCAL diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h index acd61fe..6268c33 100644 --- a/deps/v8/src/arm/assembler-arm-inl.h +++ b/deps/v8/src/arm/assembler-arm-inl.h @@ -86,7 +86,8 @@ int RelocInfo::target_address_size() { void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) { ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); - Assembler::set_target_address_at(pc_, target); + Assembler::set_target_address_at(pc_, reinterpret_cast
( + reinterpret_cast(target) & ~3)); if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) { Object* target_code = Code::GetCodeFromTargetAddress(target); host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( @@ -165,24 +166,6 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell, } -static const int kNoCodeAgeSequenceLength = 3; - -Code* RelocInfo::code_age_stub() { - ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); - return Code::GetCodeFromTargetAddress( - Memory::Address_at(pc_ + Assembler::kInstrSize * - (kNoCodeAgeSequenceLength - 1))); -} - - -void RelocInfo::set_code_age_stub(Code* stub) { - ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); - Memory::Address_at(pc_ + Assembler::kInstrSize * - (kNoCodeAgeSequenceLength - 1)) = - stub->instruction_start(); -} - - Address RelocInfo::call_address() { // The 2 instructions offset assumes patched debug break slot or return // sequence. @@ -256,8 +239,6 @@ void RelocInfo::Visit(ObjectVisitor* visitor) { visitor->VisitGlobalPropertyCell(this); } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { visitor->VisitExternalReference(this); - } else if (RelocInfo::IsCodeAgeSequence(mode)) { - visitor->VisitCodeAgeSequence(this); #ifdef ENABLE_DEBUGGER_SUPPORT // TODO(isolates): Get a cached isolate below. } else if (((RelocInfo::IsJSReturn(mode) && @@ -284,8 +265,6 @@ void RelocInfo::Visit(Heap* heap) { StaticVisitor::VisitGlobalPropertyCell(heap, this); } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { StaticVisitor::VisitExternalReference(this); - } else if (RelocInfo::IsCodeAgeSequence(mode)) { - StaticVisitor::VisitCodeAgeSequence(heap, this); #ifdef ENABLE_DEBUGGER_SUPPORT } else if (heap->isolate()->debug()->has_break_points() && ((RelocInfo::IsJSReturn(mode) && @@ -494,12 +473,14 @@ void Assembler::set_target_pointer_at(Address pc, Address target) { Address Assembler::target_address_at(Address pc) { - return target_pointer_at(pc); + return reinterpret_cast
( + reinterpret_cast(target_pointer_at(pc)) & ~3); } void Assembler::set_target_address_at(Address pc, Address target) { - set_target_pointer_at(pc, target); + set_target_pointer_at(pc, reinterpret_cast
( + reinterpret_cast(target) & ~3)); } diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index 47ea0e2..9be62a4 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -318,11 +318,46 @@ const Instr kLdrStrInstrArgumentMask = 0x0000ffff; const Instr kLdrStrOffsetMask = 0x00000fff; -Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) - : AssemblerBase(isolate, buffer, buffer_size), +// Spare buffer. +static const int kMinimalBufferSize = 4*KB; + + +Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size) + : AssemblerBase(arg_isolate), recorded_ast_id_(TypeFeedbackId::None()), - positions_recorder_(this) { - reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_); + positions_recorder_(this), + emit_debug_code_(FLAG_debug_code), + predictable_code_size_(false) { + if (buffer == NULL) { + // Do our own buffer management. + if (buffer_size <= kMinimalBufferSize) { + buffer_size = kMinimalBufferSize; + + if (isolate()->assembler_spare_buffer() != NULL) { + buffer = isolate()->assembler_spare_buffer(); + isolate()->set_assembler_spare_buffer(NULL); + } + } + if (buffer == NULL) { + buffer_ = NewArray(buffer_size); + } else { + buffer_ = static_cast(buffer); + } + buffer_size_ = buffer_size; + own_buffer_ = true; + + } else { + // Use externally provided buffer instead. + ASSERT(buffer_size > 0); + buffer_ = static_cast(buffer); + buffer_size_ = buffer_size; + own_buffer_ = false; + } + + // Set up buffer pointers. + ASSERT(buffer_ != NULL); + pc_ = buffer_; + reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); num_pending_reloc_info_ = 0; next_buffer_check_ = 0; const_pool_blocked_nesting_ = 0; @@ -335,6 +370,14 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) Assembler::~Assembler() { ASSERT(const_pool_blocked_nesting_ == 0); + if (own_buffer_) { + if (isolate()->assembler_spare_buffer() == NULL && + buffer_size_ == kMinimalBufferSize) { + isolate()->set_assembler_spare_buffer(buffer_); + } else { + DeleteArray(buffer_); + } + } } @@ -2349,20 +2392,6 @@ void Assembler::vmul(const DwVfpRegister dst, } -void Assembler::vmla(const DwVfpRegister dst, - const DwVfpRegister src1, - const DwVfpRegister src2, - const Condition cond) { - // Instruction details available in ARM DDI 0406C.b, A8-892. - // cond(31-28) | 11100(27-23) | D=?(22) | 00(21-20) | Vn(19-16) | - // Vd(15-12) | 101(11-9) | sz(8)=1 | N=?(7) | op(6)=0 | M=?(5) | 0(4) | - // Vm(3-0) - unsigned x = (cond | 0x1C*B23 | src1.code()*B16 | - dst.code()*B12 | 0x5*B9 | B8 | src2.code()); - emit(x); -} - - void Assembler::vdiv(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, @@ -2701,9 +2730,9 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) { b(&after_pool); } - // Put down constant pool marker "Undefined instruction". - emit(kConstantPoolMarker | - EncodeConstantPoolLength(num_pending_reloc_info_)); + // Put down constant pool marker "Undefined instruction" as specified by + // A5.6 (ARMv7) Instruction set encoding. + emit(kConstantPoolMarker | num_pending_reloc_info_); // Emit constant pool entries. for (int i = 0; i < num_pending_reloc_info_; i++) { diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index 3b9bb80..dfcce60 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -647,7 +647,15 @@ class Assembler : public AssemblerBase { // is too small, a fatal error occurs. No deallocation of the buffer is done // upon destruction of the assembler. Assembler(Isolate* isolate, void* buffer, int buffer_size); - virtual ~Assembler(); + ~Assembler(); + + // Overrides the default provided by FLAG_debug_code. + void set_emit_debug_code(bool value) { emit_debug_code_ = value; } + + // Avoids using instructions that vary in size in unpredictable ways between + // the snapshot and the running VM. This is needed by the full compiler so + // that it can recompile code with debug support and fix the PC. + void set_predictable_code_size(bool value) { predictable_code_size_ = value; } // GetCode emits any pending (non-emitted) code and fills the descriptor // desc. GetCode() is idempotent; it returns the same result if no other @@ -1126,10 +1134,6 @@ class Assembler : public AssemblerBase { const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond = al); - void vmla(const DwVfpRegister dst, - const DwVfpRegister src1, - const DwVfpRegister src2, - const Condition cond = al); void vdiv(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, @@ -1181,6 +1185,8 @@ class Assembler : public AssemblerBase { // Jump unconditionally to given label. void jmp(Label* L) { b(L, al); } + bool predictable_code_size() const { return predictable_code_size_; } + static bool use_immediate_embedded_pointer_loads( const Assembler* assembler) { #ifdef USE_BLX @@ -1276,6 +1282,8 @@ class Assembler : public AssemblerBase { void db(uint8_t data); void dd(uint32_t data); + int pc_offset() const { return pc_ - buffer_; } + PositionsRecorder* positions_recorder() { return &positions_recorder_; } // Read/patch instructions @@ -1321,8 +1329,6 @@ class Assembler : public AssemblerBase { // and the accessed constant. static const int kMaxDistToPool = 4*KB; static const int kMaxNumPendingRelocInfo = kMaxDistToPool/kInstrSize; - STATIC_ASSERT((kConstantPoolLengthMaxMask & kMaxNumPendingRelocInfo) == - kMaxNumPendingRelocInfo); // Postpone the generation of the constant pool for the specified number of // instructions. @@ -1337,6 +1343,8 @@ class Assembler : public AssemblerBase { // the relocation info. TypeFeedbackId recorded_ast_id_; + bool emit_debug_code() const { return emit_debug_code_; } + int buffer_space() const { return reloc_info_writer.pos() - pc_; } // Decode branch instruction at pos and return branch target pos @@ -1378,6 +1386,13 @@ class Assembler : public AssemblerBase { } private: + // Code buffer: + // The buffer into which code and relocation info are generated. + byte* buffer_; + int buffer_size_; + // True if the assembler owns the buffer, false if buffer is external. + bool own_buffer_; + int next_buffer_check_; // pc offset of next buffer check // Code generation @@ -1386,6 +1401,7 @@ class Assembler : public AssemblerBase { // not have to check for overflow. The same is true for writes of large // relocation info entries. static const int kGap = 32; + byte* pc_; // the program counter; moves forward // Constant pool generation // Pools are emitted in the instruction stream, preferably after unconditional @@ -1479,6 +1495,10 @@ class Assembler : public AssemblerBase { friend class BlockConstPoolScope; PositionsRecorder positions_recorder_; + + bool emit_debug_code_; + bool predictable_code_size_; + friend class PositionsRecorder; friend class EnsureSpace; }; @@ -1492,6 +1512,26 @@ class EnsureSpace BASE_EMBEDDED { }; +class PredictableCodeSizeScope { + public: + explicit PredictableCodeSizeScope(Assembler* assembler) + : asm_(assembler) { + old_value_ = assembler->predictable_code_size(); + assembler->set_predictable_code_size(true); + } + + ~PredictableCodeSizeScope() { + if (!old_value_) { + asm_->set_predictable_code_size(false); + } + } + + private: + Assembler* asm_; + bool old_value_; +}; + + } } // namespace v8::internal #endif // V8_ARM_ASSEMBLER_ARM_H_ diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index 24d14e8..2d1d7b1 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -1226,39 +1226,6 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { } -static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { - // For now, we are relying on the fact that make_code_young doesn't do any - // garbage collection which allows us to save/restore the registers without - // worrying about which of them contain pointers. We also don't build an - // internal frame to make the code faster, since we shouldn't have to do stack - // crawls in MakeCodeYoung. This seems a bit fragile. - - // The following registers must be saved and restored when calling through to - // the runtime: - // r0 - contains return address (beginning of patch sequence) - // r1 - function object - FrameScope scope(masm, StackFrame::MANUAL); - __ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit()); - __ PrepareCallCFunction(1, 0, r1); - __ CallCFunction( - ExternalReference::get_make_code_young_function(masm->isolate()), 1); - __ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit()); - __ mov(pc, r0); -} - -#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \ -void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \ - MacroAssembler* masm) { \ - GenerateMakeCodeYoungAgainCommon(masm); \ -} \ -void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \ - MacroAssembler* masm) { \ - GenerateMakeCodeYoungAgainCommon(masm); \ -} -CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR) -#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR - - static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, Deoptimizer::BailoutType type) { { diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index 9484f85..ceb108f 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -41,7 +41,8 @@ namespace internal { static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow, - Condition cond); + Condition cond, + bool never_nan_nan); static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs, Register rhs, @@ -626,6 +627,24 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm, } +void FloatingPointHelper::LoadOperands( + MacroAssembler* masm, + FloatingPointHelper::Destination destination, + Register heap_number_map, + Register scratch1, + Register scratch2, + Label* slow) { + + // Load right operand (r0) to d6 or r2/r3. + LoadNumber(masm, destination, + r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow); + + // Load left operand (r1) to d7 or r0/r1. + LoadNumber(masm, destination, + r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow); +} + + void FloatingPointHelper::LoadNumber(MacroAssembler* masm, Destination destination, Register object, @@ -729,13 +748,13 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, Register int_scratch, Destination destination, DwVfpRegister double_dst, - Register dst_mantissa, - Register dst_exponent, + Register dst1, + Register dst2, Register scratch2, SwVfpRegister single_scratch) { ASSERT(!int_scratch.is(scratch2)); - ASSERT(!int_scratch.is(dst_mantissa)); - ASSERT(!int_scratch.is(dst_exponent)); + ASSERT(!int_scratch.is(dst1)); + ASSERT(!int_scratch.is(dst2)); Label done; @@ -744,57 +763,56 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, __ vmov(single_scratch, int_scratch); __ vcvt_f64_s32(double_dst, single_scratch); if (destination == kCoreRegisters) { - __ vmov(dst_mantissa, dst_exponent, double_dst); + __ vmov(dst1, dst2, double_dst); } } else { Label fewer_than_20_useful_bits; // Expected output: - // | dst_exponent | dst_mantissa | + // | dst2 | dst1 | // | s | exp | mantissa | // Check for zero. __ cmp(int_scratch, Operand::Zero()); - __ mov(dst_exponent, int_scratch); - __ mov(dst_mantissa, int_scratch); + __ mov(dst2, int_scratch); + __ mov(dst1, int_scratch); __ b(eq, &done); // Preload the sign of the value. - __ and_(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask), SetCC); + __ and_(dst2, int_scratch, Operand(HeapNumber::kSignMask), SetCC); // Get the absolute value of the object (as an unsigned integer). __ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi); // Get mantissa[51:20]. // Get the position of the first set bit. - __ CountLeadingZeros(dst_mantissa, int_scratch, scratch2); - __ rsb(dst_mantissa, dst_mantissa, Operand(31)); + __ CountLeadingZeros(dst1, int_scratch, scratch2); + __ rsb(dst1, dst1, Operand(31)); // Set the exponent. - __ add(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias)); - __ Bfi(dst_exponent, scratch2, scratch2, + __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias)); + __ Bfi(dst2, scratch2, scratch2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); // Clear the first non null bit. __ mov(scratch2, Operand(1)); - __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst_mantissa)); + __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst1)); - __ cmp(dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord)); + __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord)); // Get the number of bits to set in the lower part of the mantissa. - __ sub(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord), - SetCC); + __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); __ b(mi, &fewer_than_20_useful_bits); // Set the higher 20 bits of the mantissa. - __ orr(dst_exponent, dst_exponent, Operand(int_scratch, LSR, scratch2)); + __ orr(dst2, dst2, Operand(int_scratch, LSR, scratch2)); __ rsb(scratch2, scratch2, Operand(32)); - __ mov(dst_mantissa, Operand(int_scratch, LSL, scratch2)); + __ mov(dst1, Operand(int_scratch, LSL, scratch2)); __ b(&done); __ bind(&fewer_than_20_useful_bits); - __ rsb(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord)); + __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord)); __ mov(scratch2, Operand(int_scratch, LSL, scratch2)); - __ orr(dst_exponent, dst_exponent, scratch2); + __ orr(dst2, dst2, scratch2); // Set dst1 to 0. - __ mov(dst_mantissa, Operand::Zero()); + __ mov(dst1, Operand::Zero()); } __ bind(&done); } @@ -805,8 +823,8 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, Destination destination, DwVfpRegister double_dst, DwVfpRegister double_scratch, - Register dst_mantissa, - Register dst_exponent, + Register dst1, + Register dst2, Register heap_number_map, Register scratch1, Register scratch2, @@ -822,8 +840,8 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, __ JumpIfNotSmi(object, &obj_is_not_smi); __ SmiUntag(scratch1, object); - ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa, - dst_exponent, scratch2, single_scratch); + ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2, + scratch2, single_scratch); __ b(&done); __ bind(&obj_is_not_smi); @@ -850,52 +868,26 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, __ b(ne, not_int32); if (destination == kCoreRegisters) { - __ vmov(dst_mantissa, dst_exponent, double_dst); + __ vmov(dst1, dst2, double_dst); } } else { ASSERT(!scratch1.is(object) && !scratch2.is(object)); - // Load the double value in the destination registers. - bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent); - if (save_registers) { - // Save both output registers, because the other one probably holds - // an important value too. - __ Push(dst_exponent, dst_mantissa); - } - __ Ldrd(dst_mantissa, dst_exponent, - FieldMemOperand(object, HeapNumber::kValueOffset)); + // Load the double value in the destination registers.. + __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); // Check for 0 and -0. - Label zero; - __ bic(scratch1, dst_exponent, Operand(HeapNumber::kSignMask)); - __ orr(scratch1, scratch1, Operand(dst_mantissa)); + __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask)); + __ orr(scratch1, scratch1, Operand(dst2)); __ cmp(scratch1, Operand::Zero()); - __ b(eq, &zero); + __ b(eq, &done); // Check that the value can be exactly represented by a 32-bit integer. // Jump to not_int32 if that's not the case. - Label restore_input_and_miss; - DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2, - &restore_input_and_miss); - - // dst_* were trashed. Reload the double value. - if (save_registers) { - __ Pop(dst_exponent, dst_mantissa); - } - __ Ldrd(dst_mantissa, dst_exponent, - FieldMemOperand(object, HeapNumber::kValueOffset)); - __ b(&done); - - __ bind(&restore_input_and_miss); - if (save_registers) { - __ Pop(dst_exponent, dst_mantissa); - } - __ b(not_int32); + DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32); - __ bind(&zero); - if (save_registers) { - __ Drop(2); - } + // dst1 and dst2 were trashed. Reload the double value. + __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); } __ bind(&done); @@ -918,15 +910,14 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, !scratch1.is(scratch3) && !scratch2.is(scratch3)); - Label done, maybe_undefined; + Label done; __ UntagAndJumpIfSmi(dst, object, &done); __ AssertRootValue(heap_number_map, Heap::kHeapNumberMapRootIndex, "HeapNumberMap register clobbered."); - - __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined); + __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); // Object is a heap number. // Convert the floating point value to a 32-bit integer. @@ -973,28 +964,20 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, __ tst(scratch1, Operand(HeapNumber::kSignMask)); __ rsb(dst, dst, Operand::Zero(), LeaveCC, mi); } - __ b(&done); - - __ bind(&maybe_undefined); - __ CompareRoot(object, Heap::kUndefinedValueRootIndex); - __ b(ne, not_int32); - // |undefined| is truncated to 0. - __ mov(dst, Operand(Smi::FromInt(0))); - // Fall through. __ bind(&done); } void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, - Register src_exponent, - Register src_mantissa, + Register src1, + Register src2, Register dst, Register scratch, Label* not_int32) { // Get exponent alone in scratch. __ Ubfx(scratch, - src_exponent, + src1, HeapNumber::kExponentShift, HeapNumber::kExponentBits); @@ -1014,11 +997,11 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, // Another way to put it is that if (exponent - signbit) > 30 then the // number cannot be represented as an int32. Register tmp = dst; - __ sub(tmp, scratch, Operand(src_exponent, LSR, 31)); + __ sub(tmp, scratch, Operand(src1, LSR, 31)); __ cmp(tmp, Operand(30)); __ b(gt, not_int32); // - Bits [21:0] in the mantissa are not null. - __ tst(src_mantissa, Operand(0x3fffff)); + __ tst(src2, Operand(0x3fffff)); __ b(ne, not_int32); // Otherwise the exponent needs to be big enough to shift left all the @@ -1029,19 +1012,19 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, // Get the 32 higher bits of the mantissa in dst. __ Ubfx(dst, - src_mantissa, + src2, HeapNumber::kMantissaBitsInTopWord, 32 - HeapNumber::kMantissaBitsInTopWord); __ orr(dst, dst, - Operand(src_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord)); + Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord)); // Create the mask and test the lower bits (of the higher bits). __ rsb(scratch, scratch, Operand(32)); - __ mov(src_mantissa, Operand(1)); - __ mov(src_exponent, Operand(src_mantissa, LSL, scratch)); - __ sub(src_exponent, src_exponent, Operand(1)); - __ tst(dst, src_exponent); + __ mov(src2, Operand(1)); + __ mov(src1, Operand(src2, LSL, scratch)); + __ sub(src1, src1, Operand(1)); + __ tst(dst, src1); __ b(ne, not_int32); } @@ -1165,43 +1148,48 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { // for "identity and not NaN". static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow, - Condition cond) { + Condition cond, + bool never_nan_nan) { Label not_identical; Label heap_number, return_equal; __ cmp(r0, r1); __ b(ne, ¬_identical); - // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(), - // so we do the second best thing - test it ourselves. - // They are both equal and they are not both Smis so both of them are not - // Smis. If it's not a heap number, then return equal. - if (cond == lt || cond == gt) { - __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE); - __ b(ge, slow); - } else { - __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); - __ b(eq, &heap_number); - // Comparing JS objects with <=, >= is complicated. - if (cond != eq) { - __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE)); + // The two objects are identical. If we know that one of them isn't NaN then + // we now know they test equal. + if (cond != eq || !never_nan_nan) { + // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(), + // so we do the second best thing - test it ourselves. + // They are both equal and they are not both Smis so both of them are not + // Smis. If it's not a heap number, then return equal. + if (cond == lt || cond == gt) { + __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE); __ b(ge, slow); - // Normally here we fall through to return_equal, but undefined is - // special: (undefined == undefined) == true, but - // (undefined <= undefined) == false! See ECMAScript 11.8.5. - if (cond == le || cond == ge) { - __ cmp(r4, Operand(ODDBALL_TYPE)); - __ b(ne, &return_equal); - __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); - __ cmp(r0, r2); - __ b(ne, &return_equal); - if (cond == le) { - // undefined <= undefined should fail. - __ mov(r0, Operand(GREATER)); - } else { - // undefined >= undefined should fail. - __ mov(r0, Operand(LESS)); + } else { + __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); + __ b(eq, &heap_number); + // Comparing JS objects with <=, >= is complicated. + if (cond != eq) { + __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE)); + __ b(ge, slow); + // Normally here we fall through to return_equal, but undefined is + // special: (undefined == undefined) == true, but + // (undefined <= undefined) == false! See ECMAScript 11.8.5. + if (cond == le || cond == ge) { + __ cmp(r4, Operand(ODDBALL_TYPE)); + __ b(ne, &return_equal); + __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); + __ cmp(r0, r2); + __ b(ne, &return_equal); + if (cond == le) { + // undefined <= undefined should fail. + __ mov(r0, Operand(GREATER)); + } else { + // undefined >= undefined should fail. + __ mov(r0, Operand(LESS)); + } + __ Ret(); } - __ Ret(); } } } @@ -1216,45 +1204,47 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, } __ Ret(); - // For less and greater we don't have to check for NaN since the result of - // x < x is false regardless. For the others here is some code to check - // for NaN. - if (cond != lt && cond != gt) { - __ bind(&heap_number); - // It is a heap number, so return non-equal if it's NaN and equal if it's - // not NaN. - - // The representation of NaN values has all exponent bits (52..62) set, - // and not all mantissa bits (0..51) clear. - // Read top bits of double representation (second word of value). - __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); - // Test that exponent bits are all set. - __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); - // NaNs have all-one exponents so they sign extend to -1. - __ cmp(r3, Operand(-1)); - __ b(ne, &return_equal); - - // Shift out flag and all exponent bits, retaining only mantissa. - __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); - // Or with all low-bits of mantissa. - __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); - __ orr(r0, r3, Operand(r2), SetCC); - // For equal we already have the right value in r0: Return zero (equal) - // if all bits in mantissa are zero (it's an Infinity) and non-zero if - // not (it's a NaN). For <= and >= we need to load r0 with the failing - // value if it's a NaN. - if (cond != eq) { - // All-zero means Infinity means equal. - __ Ret(eq); - if (cond == le) { - __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. - } else { - __ mov(r0, Operand(LESS)); // NaN >= NaN should fail. + if (cond != eq || !never_nan_nan) { + // For less and greater we don't have to check for NaN since the result of + // x < x is false regardless. For the others here is some code to check + // for NaN. + if (cond != lt && cond != gt) { + __ bind(&heap_number); + // It is a heap number, so return non-equal if it's NaN and equal if it's + // not NaN. + + // The representation of NaN values has all exponent bits (52..62) set, + // and not all mantissa bits (0..51) clear. + // Read top bits of double representation (second word of value). + __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); + // Test that exponent bits are all set. + __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); + // NaNs have all-one exponents so they sign extend to -1. + __ cmp(r3, Operand(-1)); + __ b(ne, &return_equal); + + // Shift out flag and all exponent bits, retaining only mantissa. + __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); + // Or with all low-bits of mantissa. + __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); + __ orr(r0, r3, Operand(r2), SetCC); + // For equal we already have the right value in r0: Return zero (equal) + // if all bits in mantissa are zero (it's an Infinity) and non-zero if + // not (it's a NaN). For <= and >= we need to load r0 with the failing + // value if it's a NaN. + if (cond != eq) { + // All-zero means Infinity means equal. + __ Ret(eq); + if (cond == le) { + __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. + } else { + __ mov(r0, Operand(LESS)); // NaN >= NaN should fail. + } } + __ Ret(); } - __ Ret(); + // No fall through here. } - // No fall through here. __ bind(¬_identical); } @@ -1688,60 +1678,42 @@ void NumberToStringStub::Generate(MacroAssembler* masm) { } -static void ICCompareStub_CheckInputType(MacroAssembler* masm, - Register input, - Register scratch, - CompareIC::State expected, - Label* fail) { - Label ok; - if (expected == CompareIC::SMI) { - __ JumpIfNotSmi(input, fail); - } else if (expected == CompareIC::HEAP_NUMBER) { - __ JumpIfSmi(input, &ok); - __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail, - DONT_DO_SMI_CHECK); - } - // We could be strict about symbol/string here, but as long as - // hydrogen doesn't care, the stub doesn't have to care either. - __ bind(&ok); -} - - -// On entry r1 and r2 are the values to be compared. +// On entry lhs_ and rhs_ are the values to be compared. // On exit r0 is 0, positive or negative to indicate the result of // the comparison. -void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { - Register lhs = r1; - Register rhs = r0; - Condition cc = GetCondition(); - - Label miss; - ICCompareStub_CheckInputType(masm, lhs, r2, left_, &miss); - ICCompareStub_CheckInputType(masm, rhs, r3, right_, &miss); +void CompareStub::Generate(MacroAssembler* masm) { + ASSERT((lhs_.is(r0) && rhs_.is(r1)) || + (lhs_.is(r1) && rhs_.is(r0))); Label slow; // Call builtin. Label not_smis, both_loaded_as_doubles, lhs_not_nan; - Label not_two_smis, smi_done; - __ orr(r2, r1, r0); - __ JumpIfNotSmi(r2, ¬_two_smis); - __ mov(r1, Operand(r1, ASR, 1)); - __ sub(r0, r1, Operand(r0, ASR, 1)); - __ Ret(); - __ bind(¬_two_smis); + if (include_smi_compare_) { + Label not_two_smis, smi_done; + __ orr(r2, r1, r0); + __ JumpIfNotSmi(r2, ¬_two_smis); + __ mov(r1, Operand(r1, ASR, 1)); + __ sub(r0, r1, Operand(r0, ASR, 1)); + __ Ret(); + __ bind(¬_two_smis); + } else if (FLAG_debug_code) { + __ orr(r2, r1, r0); + __ tst(r2, Operand(kSmiTagMask)); + __ Assert(ne, "CompareStub: unexpected smi operands."); + } // NOTICE! This code is only reached after a smi-fast-case check, so // it is certain that at least one operand isn't a smi. // Handle the case where the objects are identical. Either returns the answer // or goes to slow. Only falls through if the objects were not identical. - EmitIdenticalObjectComparison(masm, &slow, cc); + EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_); // If either is a Smi (we know that not both are), then they can only // be strictly equal if the other is a HeapNumber. STATIC_ASSERT(kSmiTag == 0); ASSERT_EQ(0, Smi::FromInt(0)); - __ and_(r2, lhs, Operand(rhs)); + __ and_(r2, lhs_, Operand(rhs_)); __ JumpIfNotSmi(r2, ¬_smis); // One operand is a smi. EmitSmiNonsmiComparison generates code that can: // 1) Return the answer. @@ -1752,7 +1724,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { // comparison. If VFP3 is supported the double values of the numbers have // been loaded into d7 and d6. Otherwise, the double values have been loaded // into r0, r1, r2, and r3. - EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict()); + EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_); __ bind(&both_loaded_as_doubles); // The arguments have been converted to doubles and stored in d6 and d7, if @@ -1775,7 +1747,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { // If one of the sides was a NaN then the v flag is set. Load r0 with // whatever it takes to make the comparison fail, since comparisons with NaN // always fail. - if (cc == lt || cc == le) { + if (cc_ == lt || cc_ == le) { __ mov(r0, Operand(GREATER)); } else { __ mov(r0, Operand(LESS)); @@ -1784,19 +1756,19 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { } else { // Checks for NaN in the doubles we have loaded. Can return the answer or // fall through if neither is a NaN. Also binds lhs_not_nan. - EmitNanCheck(masm, &lhs_not_nan, cc); + EmitNanCheck(masm, &lhs_not_nan, cc_); // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the // answer. Never falls through. - EmitTwoNonNanDoubleComparison(masm, cc); + EmitTwoNonNanDoubleComparison(masm, cc_); } __ bind(¬_smis); // At this point we know we are dealing with two different objects, // and neither of them is a Smi. The objects are in rhs_ and lhs_. - if (strict()) { + if (strict_) { // This returns non-equal for some object types, or falls through if it // was not lucky. - EmitStrictTwoHeapObjectCompare(masm, lhs, rhs); + EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_); } Label check_for_symbols; @@ -1806,8 +1778,8 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { // that case. If the inputs are not doubles then jumps to check_for_symbols. // In this case r2 will contain the type of rhs_. Never falls through. EmitCheckForTwoHeapNumbers(masm, - lhs, - rhs, + lhs_, + rhs_, &both_loaded_as_doubles, &check_for_symbols, &flat_string_check); @@ -1815,31 +1787,31 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { __ bind(&check_for_symbols); // In the strict case the EmitStrictTwoHeapObjectCompare already took care of // symbols. - if (cc == eq && !strict()) { + if (cc_ == eq && !strict_) { // Returns an answer for two symbols or two detectable objects. // Otherwise jumps to string case or not both strings case. // Assumes that r2 is the type of rhs_ on entry. - EmitCheckForSymbolsOrObjects(masm, lhs, rhs, &flat_string_check, &slow); + EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow); } // Check for both being sequential ASCII strings, and inline if that is the // case. __ bind(&flat_string_check); - __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow); + __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow); __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3); - if (cc == eq) { + if (cc_ == eq) { StringCompareStub::GenerateFlatAsciiStringEquals(masm, - lhs, - rhs, + lhs_, + rhs_, r2, r3, r4); } else { StringCompareStub::GenerateCompareFlatAsciiStrings(masm, - lhs, - rhs, + lhs_, + rhs_, r2, r3, r4, @@ -1849,18 +1821,18 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { __ bind(&slow); - __ Push(lhs, rhs); + __ Push(lhs_, rhs_); // Figure out which native to call and setup the arguments. Builtins::JavaScript native; - if (cc == eq) { - native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; + if (cc_ == eq) { + native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; } else { native = Builtins::COMPARE; int ncr; // NaN compare result - if (cc == lt || cc == le) { + if (cc_ == lt || cc_ == le) { ncr = GREATER; } else { - ASSERT(cc == gt || cc == ge); // remaining cases + ASSERT(cc_ == gt || cc_ == ge); // remaining cases ncr = LESS; } __ mov(r0, Operand(Smi::FromInt(ncr))); @@ -1870,9 +1842,6 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) // tagged as a small integer. __ InvokeBuiltin(native, JUMP_FUNCTION); - - __ bind(&miss); - GenerateMiss(masm); } @@ -2356,23 +2325,20 @@ void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) { } -void BinaryOpStub::Initialize() { - platform_specific_bit_ = CpuFeatures::IsSupported(VFP2); -} - - void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { Label get_result; __ Push(r1, r0); __ mov(r2, Operand(Smi::FromInt(MinorKey()))); - __ push(r2); + __ mov(r1, Operand(Smi::FromInt(op_))); + __ mov(r0, Operand(Smi::FromInt(operands_type_))); + __ Push(r2, r1, r0); __ TailCallExternalReference( ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()), - 3, + 5, 1); } @@ -2383,8 +2349,59 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( } -void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, - Token::Value op) { +void BinaryOpStub::Generate(MacroAssembler* masm) { + // Explicitly allow generation of nested stubs. It is safe here because + // generation code does not use any raw pointers. + AllowStubCallsScope allow_stub_calls(masm, true); + + switch (operands_type_) { + case BinaryOpIC::UNINITIALIZED: + GenerateTypeTransition(masm); + break; + case BinaryOpIC::SMI: + GenerateSmiStub(masm); + break; + case BinaryOpIC::INT32: + GenerateInt32Stub(masm); + break; + case BinaryOpIC::HEAP_NUMBER: + GenerateHeapNumberStub(masm); + break; + case BinaryOpIC::ODDBALL: + GenerateOddballStub(masm); + break; + case BinaryOpIC::BOTH_STRING: + GenerateBothStringStub(masm); + break; + case BinaryOpIC::STRING: + GenerateStringStub(masm); + break; + case BinaryOpIC::GENERIC: + GenerateGeneric(masm); + break; + default: + UNREACHABLE(); + } +} + + +void BinaryOpStub::PrintName(StringStream* stream) { + const char* op_name = Token::Name(op_); + const char* overwrite_name; + switch (mode_) { + case NO_OVERWRITE: overwrite_name = "Alloc"; break; + case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; + case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; + default: overwrite_name = "UnknownOverwrite"; break; + } + stream->Add("BinaryOpStub_%s_%s_%s", + op_name, + overwrite_name, + BinaryOpIC::GetName(operands_type_)); +} + + +void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) { Register left = r1; Register right = r0; Register scratch1 = r7; @@ -2394,7 +2411,7 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, STATIC_ASSERT(kSmiTag == 0); Label not_smi_result; - switch (op) { + switch (op_) { case Token::ADD: __ add(right, left, Operand(right), SetCC); // Add optimistically. __ Ret(vc); @@ -2509,24 +2526,10 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, } -void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, - Register result, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* gc_required, - OverwriteMode mode); - - -void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, - BinaryOpIC::TypeInfo left_type, - BinaryOpIC::TypeInfo right_type, - bool smi_operands, - Label* not_numbers, - Label* gc_required, - Label* miss, - Token::Value op, - OverwriteMode mode) { +void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, + bool smi_operands, + Label* not_numbers, + Label* gc_required) { Register left = r1; Register right = r0; Register scratch1 = r7; @@ -2538,17 +2541,11 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, __ AssertSmi(left); __ AssertSmi(right); } - if (left_type == BinaryOpIC::SMI) { - __ JumpIfNotSmi(left, miss); - } - if (right_type == BinaryOpIC::SMI) { - __ JumpIfNotSmi(right, miss); - } Register heap_number_map = r6; __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - switch (op) { + switch (op_) { case Token::ADD: case Token::SUB: case Token::MUL: @@ -2558,44 +2555,25 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, // depending on whether VFP3 is available or not. FloatingPointHelper::Destination destination = CpuFeatures::IsSupported(VFP2) && - op != Token::MOD ? + op_ != Token::MOD ? FloatingPointHelper::kVFPRegisters : FloatingPointHelper::kCoreRegisters; // Allocate new heap number for result. Register result = r5; - BinaryOpStub_GenerateHeapResultAllocation( - masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); + GenerateHeapResultAllocation( + masm, result, heap_number_map, scratch1, scratch2, gc_required); // Load the operands. if (smi_operands) { FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); } else { - // Load right operand to d7 or r2/r3. - if (right_type == BinaryOpIC::INT32) { - FloatingPointHelper::LoadNumberAsInt32Double( - masm, right, destination, d7, d8, r2, r3, heap_number_map, - scratch1, scratch2, s0, miss); - } else { - Label* fail = (right_type == BinaryOpIC::HEAP_NUMBER) ? miss - : not_numbers; - FloatingPointHelper::LoadNumber( - masm, destination, right, d7, r2, r3, heap_number_map, - scratch1, scratch2, fail); - } - // Load left operand to d6 or r0/r1. This keeps r0/r1 intact if it - // jumps to |miss|. - if (left_type == BinaryOpIC::INT32) { - FloatingPointHelper::LoadNumberAsInt32Double( - masm, left, destination, d6, d8, r0, r1, heap_number_map, - scratch1, scratch2, s0, miss); - } else { - Label* fail = (left_type == BinaryOpIC::HEAP_NUMBER) ? miss - : not_numbers; - FloatingPointHelper::LoadNumber( - masm, destination, left, d6, r0, r1, heap_number_map, - scratch1, scratch2, fail); - } + FloatingPointHelper::LoadOperands(masm, + destination, + heap_number_map, + scratch1, + scratch2, + not_numbers); } // Calculate the result. @@ -2604,7 +2582,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, // d6: Left value // d7: Right value CpuFeatures::Scope scope(VFP2); - switch (op) { + switch (op_) { case Token::ADD: __ vadd(d5, d6, d7); break; @@ -2628,7 +2606,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, } else { // Call the C function to handle the double operation. FloatingPointHelper::CallCCodeForDoubleOperation(masm, - op, + op_, result, scratch1); if (FLAG_debug_code) { @@ -2669,7 +2647,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, } Label result_not_a_smi; - switch (op) { + switch (op_) { case Token::BIT_OR: __ orr(r2, r3, Operand(r2)); break; @@ -2720,9 +2698,8 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, __ AllocateHeapNumber( result, scratch1, scratch2, heap_number_map, gc_required); } else { - BinaryOpStub_GenerateHeapResultAllocation( - masm, result, heap_number_map, scratch1, scratch2, gc_required, - mode); + GenerateHeapResultAllocation( + masm, result, heap_number_map, scratch1, scratch2, gc_required); } // r2: Answer as signed int32. @@ -2737,7 +2714,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, // mentioned above SHR needs to always produce a positive result. CpuFeatures::Scope scope(VFP2); __ vmov(s0, r2); - if (op == Token::SHR) { + if (op_ == Token::SHR) { __ vcvt_f64_u32(d0, s0); } else { __ vcvt_f64_s32(d0, s0); @@ -2762,14 +2739,12 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, // Generate the smi code. If the operation on smis are successful this return is // generated. If the result is not a smi and heap number allocation is not // requested the code falls through. If number allocation is requested but a -// heap number cannot be allocated the code jumps to the label gc_required. -void BinaryOpStub_GenerateSmiCode( +// heap number cannot be allocated the code jumps to the lable gc_required. +void BinaryOpStub::GenerateSmiCode( MacroAssembler* masm, Label* use_runtime, Label* gc_required, - Token::Value op, - BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, - OverwriteMode mode) { + SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { Label not_smis; Register left = r1; @@ -2782,14 +2757,12 @@ void BinaryOpStub_GenerateSmiCode( __ JumpIfNotSmi(scratch1, ¬_smis); // If the smi-smi operation results in a smi return is generated. - BinaryOpStub_GenerateSmiSmiOperation(masm, op); + GenerateSmiSmiOperation(masm); // If heap number results are possible generate the result in an allocated // heap number. - if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { - BinaryOpStub_GenerateFPOperation( - masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, - use_runtime, gc_required, ¬_smis, op, mode); + if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { + GenerateFPOperation(masm, true, use_runtime, gc_required); } __ bind(¬_smis); } @@ -2801,14 +2774,14 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { if (result_type_ == BinaryOpIC::UNINITIALIZED || result_type_ == BinaryOpIC::SMI) { // Only allow smi results. - BinaryOpStub_GenerateSmiCode( - masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_); + GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS); } else { // Allow heap number result and don't make a transition if a heap number // cannot be allocated. - BinaryOpStub_GenerateSmiCode( - masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, - mode_); + GenerateSmiCode(masm, + &call_runtime, + &call_runtime, + ALLOW_HEAPNUMBER_RESULTS); } // Code falls through if the result is not returned as either a smi or heap @@ -2816,14 +2789,23 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { GenerateTypeTransition(masm); __ bind(&call_runtime); - GenerateRegisterArgsPush(masm); GenerateCallRuntime(masm); } +void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) { + ASSERT(operands_type_ == BinaryOpIC::STRING); + ASSERT(op_ == Token::ADD); + // Try to add arguments as strings, otherwise, transition to the generic + // BinaryOpIC type. + GenerateAddStrings(masm); + GenerateTypeTransition(masm); +} + + void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { Label call_runtime; - ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); + ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING); ASSERT(op_ == Token::ADD); // If both arguments are strings, call the string add stub. // Otherwise, do a transition. @@ -2852,7 +2834,7 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { - ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); + ASSERT(operands_type_ == BinaryOpIC::INT32); Register left = r1; Register right = r0; @@ -2874,7 +2856,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { Label skip; __ orr(scratch1, left, right); __ JumpIfNotSmi(scratch1, &skip); - BinaryOpStub_GenerateSmiSmiOperation(masm, op_); + GenerateSmiSmiOperation(masm); // Fall through if the result is not a smi. __ bind(&skip); @@ -2884,15 +2866,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { case Token::MUL: case Token::DIV: case Token::MOD: { - // It could be that only SMIs have been seen at either the left - // or the right operand. For precise type feedback, patch the IC - // again if this changes. - if (left_type_ == BinaryOpIC::SMI) { - __ JumpIfNotSmi(left, &transition); - } - if (right_type_ == BinaryOpIC::SMI) { - __ JumpIfNotSmi(right, &transition); - } // Load both operands and check that they are 32-bit integer. // Jump to type transition if they are not. The registers r0 and r1 (right // and left) are preserved for the runtime call. @@ -2991,13 +2964,12 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { : BinaryOpIC::INT32)) { // We are using vfp registers so r5 is available. heap_number_result = r5; - BinaryOpStub_GenerateHeapResultAllocation(masm, - heap_number_result, - heap_number_map, - scratch1, - scratch2, - &call_runtime, - mode_); + GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &call_runtime); __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); __ vstr(d5, r0, HeapNumber::kValueOffset); __ mov(r0, heap_number_result); @@ -3016,13 +2988,12 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { // Allocate a heap number to store the result. heap_number_result = r5; - BinaryOpStub_GenerateHeapResultAllocation(masm, - heap_number_result, - heap_number_map, - scratch1, - scratch2, - &pop_and_call_runtime, - mode_); + GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &pop_and_call_runtime); // Load the left value from the value saved on the stack. __ Pop(r1, r0); @@ -3127,13 +3098,12 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { __ bind(&return_heap_number); heap_number_result = r5; - BinaryOpStub_GenerateHeapResultAllocation(masm, - heap_number_result, - heap_number_map, - scratch1, - scratch2, - &call_runtime, - mode_); + GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &call_runtime); if (CpuFeatures::IsSupported(VFP2)) { CpuFeatures::Scope scope(VFP2); @@ -3177,7 +3147,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { } __ bind(&call_runtime); - GenerateRegisterArgsPush(masm); GenerateCallRuntime(masm); } @@ -3216,32 +3185,20 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { - Label call_runtime, transition; - BinaryOpStub_GenerateFPOperation( - masm, left_type_, right_type_, false, - &transition, &call_runtime, &transition, op_, mode_); - - __ bind(&transition); - GenerateTypeTransition(masm); + Label call_runtime; + GenerateFPOperation(masm, false, &call_runtime, &call_runtime); __ bind(&call_runtime); - GenerateRegisterArgsPush(masm); GenerateCallRuntime(masm); } void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { - Label call_runtime, call_string_add_or_runtime, transition; + Label call_runtime, call_string_add_or_runtime; - BinaryOpStub_GenerateSmiCode( - masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_); + GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); - BinaryOpStub_GenerateFPOperation( - masm, left_type_, right_type_, false, - &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_); - - __ bind(&transition); - GenerateTypeTransition(masm); + GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime); __ bind(&call_string_add_or_runtime); if (op_ == Token::ADD) { @@ -3249,7 +3206,6 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { } __ bind(&call_runtime); - GenerateRegisterArgsPush(masm); GenerateCallRuntime(masm); } @@ -3285,20 +3241,61 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { } -void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, - Register result, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* gc_required, - OverwriteMode mode) { +void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { + GenerateRegisterArgsPush(masm); + switch (op_) { + case Token::ADD: + __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); + break; + case Token::SUB: + __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); + break; + case Token::MUL: + __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); + break; + case Token::DIV: + __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); + break; + case Token::MOD: + __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); + break; + case Token::BIT_OR: + __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); + break; + case Token::BIT_AND: + __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); + break; + case Token::BIT_XOR: + __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); + break; + case Token::SAR: + __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); + break; + case Token::SHR: + __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); + break; + case Token::SHL: + __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); + break; + default: + UNREACHABLE(); + } +} + + +void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm, + Register result, + Register heap_number_map, + Register scratch1, + Register scratch2, + Label* gc_required) { // Code below will scratch result if allocation fails. To keep both arguments // intact for the runtime call result cannot be one of these. ASSERT(!result.is(r0) && !result.is(r1)); - if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) { + if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) { Label skip_allocation, allocated; - Register overwritable_operand = mode == OVERWRITE_LEFT ? r1 : r0; + Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0; // If the overwritable operand is already an object, we skip the // allocation of a heap number. __ JumpIfNotSmi(overwritable_operand, &skip_allocation); @@ -3311,7 +3308,7 @@ void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, __ mov(result, Operand(overwritable_operand)); __ bind(&allocated); } else { - ASSERT(mode == NO_OVERWRITE); + ASSERT(mode_ == NO_OVERWRITE); __ AllocateHeapNumber( result, scratch1, scratch2, heap_number_map, gc_required); } @@ -4926,7 +4923,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // subject: Subject string // regexp_data: RegExp data (FixedArray) // r0: Instance type of subject string - STATIC_ASSERT(4 == kOneByteStringTag); + STATIC_ASSERT(4 == kAsciiStringTag); STATIC_ASSERT(kTwoByteStringTag == 0); // Find the code object based on the assumptions above. __ and_(r0, r0, Operand(kStringEncodingMask)); @@ -5150,7 +5147,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ ldr(subject, FieldMemOperand(subject, ExternalString::kResourceDataOffset)); // Move the pointer so that offset-wise, it looks like a sequential string. - STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); __ sub(subject, subject, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); @@ -5428,6 +5425,48 @@ void CallConstructStub::Generate(MacroAssembler* masm) { } +// Unfortunately you have to run without snapshots to see most of these +// names in the profile since most compare stubs end up in the snapshot. +void CompareStub::PrintName(StringStream* stream) { + ASSERT((lhs_.is(r0) && rhs_.is(r1)) || + (lhs_.is(r1) && rhs_.is(r0))); + const char* cc_name; + switch (cc_) { + case lt: cc_name = "LT"; break; + case gt: cc_name = "GT"; break; + case le: cc_name = "LE"; break; + case ge: cc_name = "GE"; break; + case eq: cc_name = "EQ"; break; + case ne: cc_name = "NE"; break; + default: cc_name = "UnknownCondition"; break; + } + bool is_equality = cc_ == eq || cc_ == ne; + stream->Add("CompareStub_%s", cc_name); + stream->Add(lhs_.is(r0) ? "_r0" : "_r1"); + stream->Add(rhs_.is(r0) ? "_r0" : "_r1"); + if (strict_ && is_equality) stream->Add("_STRICT"); + if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN"); + if (!include_number_compare_) stream->Add("_NO_NUMBER"); + if (!include_smi_compare_) stream->Add("_NO_SMI"); +} + + +int CompareStub::MinorKey() { + // Encode the three parameters in a unique 16 bit value. To avoid duplicate + // stubs the never NaN NaN condition is only taken into account if the + // condition is equals. + ASSERT((static_cast(cc_) >> 28) < (1 << 12)); + ASSERT((lhs_.is(r0) && rhs_.is(r1)) || + (lhs_.is(r1) && rhs_.is(r0))); + return ConditionField::encode(static_cast(cc_) >> 28) + | RegisterField::encode(lhs_.is(r0)) + | StrictField::encode(strict_) + | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false) + | IncludeNumberCompareField::encode(include_number_compare_) + | IncludeSmiCompareField::encode(include_smi_compare_); +} + + // StringCharCodeAtGenerator void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { Label flat_string; @@ -5877,7 +5916,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // Check if the two characters match. // Assumes that word load is little endian. - __ ldrh(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize)); + __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize)); __ cmp(chars, scratch); __ b(eq, &found_in_symbol_table); __ bind(&next_probe[i]); @@ -5960,28 +5999,23 @@ void SubStringStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - // Arithmetic shift right by one un-smi-tags. In this case we rotate right - // instead because we bail out on non-smi values: ROR and ASR are equivalent - // for smis but they set the flags in a way that's easier to optimize. - __ mov(r2, Operand(r2, ROR, 1), SetCC); - __ mov(r3, Operand(r3, ROR, 1), SetCC, cc); - // If either to or from had the smi tag bit set, then C is set now, and N - // has the same value: we rotated by 1, so the bottom bit is now the top bit. + // I.e., arithmetic shift right by one un-smi-tags. + __ mov(r2, Operand(r2, ASR, 1), SetCC); + __ mov(r3, Operand(r3, ASR, 1), SetCC, cc); + // If either to or from had the smi tag bit set, then carry is set now. + __ b(cs, &runtime); // Either "from" or "to" is not a smi. // We want to bailout to runtime here if From is negative. In that case, the // next instruction is not executed and we fall through to bailing out to - // runtime. - // Executed if both r2 and r3 are untagged integers. - __ sub(r2, r2, Operand(r3), SetCC, cc); - // One of the above un-smis or the above SUB could have set N==1. - __ b(mi, &runtime); // Either "from" or "to" is not an smi, or from > to. + // runtime. pl is the opposite of mi. + // Both r2 and r3 are untagged integers. + __ sub(r2, r2, Operand(r3), SetCC, pl); + __ b(mi, &runtime); // Fail if from > to. // Make sure first argument is a string. __ ldr(r0, MemOperand(sp, kStringOffset)); STATIC_ASSERT(kSmiTag == 0); - // Do a JumpIfSmi, but fold its jump into the subsequent string test. - __ tst(r0, Operand(kSmiTagMask)); - Condition is_string = masm->IsObjectStringType(r0, r1, ne); - ASSERT(is_string == eq); + __ JumpIfSmi(r0, &runtime); + Condition is_string = masm->IsObjectStringType(r0, r1); __ b(NegateCondition(is_string), &runtime); // Short-cut for the case of trivial substring. @@ -6052,7 +6086,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { // string's encoding is wrong because we always have to recheck encoding of // the newly created string's parent anyways due to externalized strings. Label two_byte_slice, set_slice_header; - STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); + STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); __ tst(r1, Operand(kStringEncodingMask)); __ b(eq, &two_byte_slice); @@ -6090,12 +6124,12 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ bind(&sequential_string); // Locate first character of underlying subject string. - STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); - __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); + __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); __ bind(&allocate_result); // Sequential acii string. Allocate the result. - STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); + STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0); __ tst(r1, Operand(kStringEncodingMask)); __ b(eq, &two_byte_sequential); @@ -6105,13 +6139,13 @@ void SubStringStub::Generate(MacroAssembler* masm) { // Locate first character of substring to copy. __ add(r5, r5, r3); // Locate first character of result. - __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); // r0: result string // r1: first character of result string // r2: result string length // r5: first character of substring to copy - STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); + STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, COPY_ASCII | DEST_ALWAYS_ALIGNED); __ jmp(&return_r0); @@ -6236,7 +6270,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop( // doesn't need an additional compare. __ SmiUntag(length); __ add(scratch1, length, - Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); __ add(left, left, Operand(scratch1)); __ add(right, right, Operand(scratch1)); __ rsb(length, length, Operand::Zero()); @@ -6389,8 +6423,8 @@ void StringAddStub::Generate(MacroAssembler* masm) { &call_runtime); // Get the two characters forming the sub string. - __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize)); - __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize)); + __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); + __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize)); // Try to lookup two character string in symbol table. If it is not found // just allocate a new one. @@ -6409,7 +6443,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // in a little endian mode) __ mov(r6, Operand(2)); __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime); - __ strh(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize)); + __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); __ IncrementCounter(counters->string_add_native(), 1, r2, r3); __ add(sp, sp, Operand(2 * kPointerSize)); __ Ret(); @@ -6459,6 +6493,11 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ tst(r4, Operand(kAsciiDataHintMask)); __ tst(r5, Operand(kAsciiDataHintMask), ne); __ b(ne, &ascii_data); + __ eor(r4, r4, Operand(r5)); + STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); + __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); + __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); + __ b(eq, &ascii_data); // Allocate a two byte cons string. __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime); @@ -6491,10 +6530,10 @@ void StringAddStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(kSeqStringTag == 0); __ tst(r4, Operand(kStringRepresentationMask)); - STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); + STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); __ add(r7, r0, - Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), + Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag), LeaveCC, eq); __ b(eq, &first_prepared); @@ -6507,10 +6546,10 @@ void StringAddStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(kSeqStringTag == 0); __ tst(r5, Operand(kStringRepresentationMask)); - STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); + STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); __ add(r1, r1, - Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), + Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag), LeaveCC, eq); __ b(eq, &second_prepared); @@ -6533,7 +6572,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ b(eq, &non_ascii_string_add_flat_result); __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime); - __ add(r6, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + __ add(r6, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); // r0: result string. // r7: first character of first string. // r1: first character of second string. @@ -6624,7 +6663,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, void ICCompareStub::GenerateSmis(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::SMI); + ASSERT(state_ == CompareIC::SMIS); Label miss; __ orr(r2, r1, r0); __ JumpIfNotSmi(r2, &miss); @@ -6645,53 +6684,31 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) { void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::HEAP_NUMBER); + ASSERT(state_ == CompareIC::HEAP_NUMBERS); Label generic_stub; Label unordered, maybe_undefined1, maybe_undefined2; Label miss; + __ and_(r2, r1, Operand(r0)); + __ JumpIfSmi(r2, &generic_stub); - if (left_ == CompareIC::SMI) { - __ JumpIfNotSmi(r1, &miss); - } - if (right_ == CompareIC::SMI) { - __ JumpIfNotSmi(r0, &miss); - } + __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE); + __ b(ne, &maybe_undefined1); + __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE); + __ b(ne, &maybe_undefined2); // Inlining the double comparison and falling back to the general compare - // stub if NaN is involved or VFP2 is unsupported. + // stub if NaN is involved or VFP3 is unsupported. if (CpuFeatures::IsSupported(VFP2)) { CpuFeatures::Scope scope(VFP2); - // Load left and right operand. - Label done, left, left_smi, right_smi; - __ JumpIfSmi(r0, &right_smi); - __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, - DONT_DO_SMI_CHECK); - __ sub(r2, r0, Operand(kHeapObjectTag)); - __ vldr(d1, r2, HeapNumber::kValueOffset); - __ b(&left); - __ bind(&right_smi); - __ SmiUntag(r2, r0); // Can't clobber r0 yet. - SwVfpRegister single_scratch = d2.low(); - __ vmov(single_scratch, r2); - __ vcvt_f64_s32(d1, single_scratch); - - __ bind(&left); - __ JumpIfSmi(r1, &left_smi); - __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, - DONT_DO_SMI_CHECK); + // Load left and right operand __ sub(r2, r1, Operand(kHeapObjectTag)); __ vldr(d0, r2, HeapNumber::kValueOffset); - __ b(&done); - __ bind(&left_smi); - __ SmiUntag(r2, r1); // Can't clobber r1 yet. - single_scratch = d3.low(); - __ vmov(single_scratch, r2); - __ vcvt_f64_s32(d0, single_scratch); + __ sub(r2, r0, Operand(kHeapObjectTag)); + __ vldr(d1, r2, HeapNumber::kValueOffset); - __ bind(&done); - // Compare operands. + // Compare operands __ VFPCompareAndSetFlags(d0, d1); // Don't base result on status bits when a NaN is involved. @@ -6705,16 +6722,14 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { } __ bind(&unordered); + CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0); __ bind(&generic_stub); - ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, - CompareIC::GENERIC); __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); __ bind(&maybe_undefined1); if (Token::IsOrderedRelationalCompareOp(op_)) { __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); __ b(ne, &miss); - __ JumpIfSmi(r1, &unordered); __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE); __ b(ne, &maybe_undefined2); __ jmp(&unordered); @@ -6732,7 +6747,7 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::SYMBOL); + ASSERT(state_ == CompareIC::SYMBOLS); Label miss; // Registers containing left and right operands respectively. @@ -6770,7 +6785,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { void ICCompareStub::GenerateStrings(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::STRING); + ASSERT(state_ == CompareIC::STRINGS); Label miss; bool equality = Token::IsEqualityOp(op_); @@ -6848,7 +6863,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { void ICCompareStub::GenerateObjects(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::OBJECT); + ASSERT(state_ == CompareIC::OBJECTS); Label miss; __ and_(r2, r1, Operand(r0)); __ JumpIfSmi(r2, &miss); @@ -7381,7 +7396,12 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { ASSERT(!address.is(r0)); __ Move(address, regs_.address()); __ Move(r0, regs_.object()); - __ Move(r1, address); + if (mode == INCREMENTAL_COMPACTION) { + __ Move(r1, address); + } else { + ASSERT(mode == INCREMENTAL); + __ ldr(r1, MemOperand(address, 0)); + } __ mov(r2, Operand(ExternalReference::isolate_address())); AllowExternalCallThatCantCauseGC scope(masm); @@ -7539,7 +7559,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS. __ bind(&double_elements); __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); - __ StoreNumberToDoubleElements(r0, r3, + __ StoreNumberToDoubleElements(r0, r3, r1, // Overwrites all regs after this. r5, r6, r7, r9, r2, &slow_elements); @@ -7549,7 +7569,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { if (entry_hook_ != NULL) { - PredictableCodeSizeScope predictable(masm, 4 * Assembler::kInstrSize); + PredictableCodeSizeScope predictable(masm); ProfileEntryHookStub stub; __ push(lr); __ CallStub(&stub); diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h index 0443cf7..3e79624 100644 --- a/deps/v8/src/arm/code-stubs-arm.h +++ b/deps/v8/src/arm/code-stubs-arm.h @@ -142,6 +142,108 @@ class UnaryOpStub: public CodeStub { }; +class BinaryOpStub: public CodeStub { + public: + BinaryOpStub(Token::Value op, OverwriteMode mode) + : op_(op), + mode_(mode), + operands_type_(BinaryOpIC::UNINITIALIZED), + result_type_(BinaryOpIC::UNINITIALIZED) { + use_vfp2_ = CpuFeatures::IsSupported(VFP2); + ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); + } + + BinaryOpStub( + int key, + BinaryOpIC::TypeInfo operands_type, + BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED) + : op_(OpBits::decode(key)), + mode_(ModeBits::decode(key)), + use_vfp2_(VFP2Bits::decode(key)), + operands_type_(operands_type), + result_type_(result_type) { } + + private: + enum SmiCodeGenerateHeapNumberResults { + ALLOW_HEAPNUMBER_RESULTS, + NO_HEAPNUMBER_RESULTS + }; + + Token::Value op_; + OverwriteMode mode_; + bool use_vfp2_; + + // Operand type information determined at runtime. + BinaryOpIC::TypeInfo operands_type_; + BinaryOpIC::TypeInfo result_type_; + + virtual void PrintName(StringStream* stream); + + // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM. + class ModeBits: public BitField {}; + class OpBits: public BitField {}; + class VFP2Bits: public BitField {}; + class OperandTypeInfoBits: public BitField {}; + class ResultTypeInfoBits: public BitField {}; + + Major MajorKey() { return BinaryOp; } + int MinorKey() { + return OpBits::encode(op_) + | ModeBits::encode(mode_) + | VFP2Bits::encode(use_vfp2_) + | OperandTypeInfoBits::encode(operands_type_) + | ResultTypeInfoBits::encode(result_type_); + } + + void Generate(MacroAssembler* masm); + void GenerateGeneric(MacroAssembler* masm); + void GenerateSmiSmiOperation(MacroAssembler* masm); + void GenerateFPOperation(MacroAssembler* masm, + bool smi_operands, + Label* not_numbers, + Label* gc_required); + void GenerateSmiCode(MacroAssembler* masm, + Label* use_runtime, + Label* gc_required, + SmiCodeGenerateHeapNumberResults heapnumber_results); + void GenerateLoadArguments(MacroAssembler* masm); + void GenerateReturn(MacroAssembler* masm); + void GenerateUninitializedStub(MacroAssembler* masm); + void GenerateSmiStub(MacroAssembler* masm); + void GenerateInt32Stub(MacroAssembler* masm); + void GenerateHeapNumberStub(MacroAssembler* masm); + void GenerateOddballStub(MacroAssembler* masm); + void GenerateStringStub(MacroAssembler* masm); + void GenerateBothStringStub(MacroAssembler* masm); + void GenerateGenericStub(MacroAssembler* masm); + void GenerateAddStrings(MacroAssembler* masm); + void GenerateCallRuntime(MacroAssembler* masm); + + void GenerateHeapResultAllocation(MacroAssembler* masm, + Register result, + Register heap_number_map, + Register scratch1, + Register scratch2, + Label* gc_required); + void GenerateRegisterArgsPush(MacroAssembler* masm); + void GenerateTypeTransition(MacroAssembler* masm); + void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm); + + virtual int GetCodeKind() { return Code::BINARY_OP_IC; } + + virtual InlineCacheState GetICState() { + return BinaryOpIC::ToState(operands_type_); + } + + virtual void FinishCode(Handle code) { + code->set_binary_op_type(operands_type_); + code->set_binary_op_result_type(result_type_); + } + + friend class CodeGenerator; +}; + + class StringHelper : public AllStatic { public: // Generate code for copying characters using a simple loop. This should only @@ -622,6 +724,20 @@ class FloatingPointHelper : public AllStatic { Register scratch1, Register scratch2); + // Loads objects from r0 and r1 (right and left in binary operations) into + // floating point registers. Depending on the destination the values ends up + // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is + // floating point registers VFP3 must be supported. If core registers are + // requested when VFP3 is supported d6 and d7 will still be scratched. If + // either r0 or r1 is not a number (not smi and not heap number object) the + // not_number label is jumped to with r0 and r1 intact. + static void LoadOperands(MacroAssembler* masm, + FloatingPointHelper::Destination destination, + Register heap_number_map, + Register scratch1, + Register scratch2, + Label* not_number); + // Convert the smi or heap number in object to an int32 using the rules // for ToInt32 as described in ECMAScript 9.5.: the value is truncated // and brought into the range -2^31 .. +2^31 - 1. @@ -720,12 +836,7 @@ class FloatingPointHelper : public AllStatic { Register heap_number_result, Register scratch); - // Loads the objects from |object| into floating point registers. - // Depending on |destination| the value ends up either in |dst| or - // in |dst1|/|dst2|. If |destination| is kVFPRegisters, then VFP3 - // must be supported. If kCoreRegisters are requested and VFP3 is - // supported, |dst| will be scratched. If |object| is neither smi nor - // heap number, |not_number| is jumped to with |object| still intact. + private: static void LoadNumber(MacroAssembler* masm, FloatingPointHelper::Destination destination, Register object, diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index bb771b1..09166c3 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -31,11 +31,11 @@ #include "codegen.h" #include "macro-assembler.h" -#include "simulator-arm.h" namespace v8 { namespace internal { +#define __ ACCESS_MASM(masm) UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) { switch (type) { @@ -49,74 +49,6 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) { } -#define __ masm. - - -#if defined(USE_SIMULATOR) -byte* fast_exp_arm_machine_code = NULL; -double fast_exp_simulator(double x) { - return Simulator::current(Isolate::Current())->CallFP( - fast_exp_arm_machine_code, x, 0); -} -#endif - - -UnaryMathFunction CreateExpFunction() { - if (!CpuFeatures::IsSupported(VFP2)) return &exp; - if (!FLAG_fast_math) return &exp; - size_t actual_size; - byte* buffer = static_cast(OS::Allocate(1 * KB, &actual_size, true)); - if (buffer == NULL) return &exp; - ExternalReference::InitializeMathExpData(); - - MacroAssembler masm(NULL, buffer, static_cast(actual_size)); - - { - CpuFeatures::Scope use_vfp(VFP2); - DoubleRegister input = d0; - DoubleRegister result = d1; - DoubleRegister double_scratch1 = d2; - DoubleRegister double_scratch2 = d3; - Register temp1 = r4; - Register temp2 = r5; - Register temp3 = r6; - - if (masm.use_eabi_hardfloat()) { - // Input value is in d0 anyway, nothing to do. - } else { - __ vmov(input, r0, r1); - } - __ Push(temp3, temp2, temp1); - MathExpGenerator::EmitMathExp( - &masm, input, result, double_scratch1, double_scratch2, - temp1, temp2, temp3); - __ Pop(temp3, temp2, temp1); - if (masm.use_eabi_hardfloat()) { - __ vmov(d0, result); - } else { - __ vmov(r0, r1, result); - } - __ Ret(); - } - - CodeDesc desc; - masm.GetCode(&desc); - - CPU::FlushICache(buffer, actual_size); - OS::ProtectCode(buffer, actual_size); - -#if !defined(USE_SIMULATOR) - return FUNCTION_CAST(buffer); -#else - fast_exp_arm_machine_code = buffer; - return &fast_exp_simulator; -#endif -} - - -#undef __ - - UnaryMathFunction CreateSqrtFunction() { return &sqrt; } @@ -141,8 +73,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { // ------------------------------------------------------------------------- // Code generators -#define __ ACCESS_MASM(masm) - void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( MacroAssembler* masm) { // ----------- S t a t e ------------- @@ -262,7 +192,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( HeapObject::kMapOffset, r3, r9, - kLRHasNotBeenSaved, + kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); @@ -486,7 +416,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, __ b(ne, &external_string); // Prepare sequential strings - STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); __ add(string, string, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); @@ -520,188 +450,8 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, __ bind(&done); } - -void SeqStringSetCharGenerator::Generate(MacroAssembler* masm, - String::Encoding encoding, - Register string, - Register index, - Register value) { - if (FLAG_debug_code) { - __ tst(index, Operand(kSmiTagMask)); - __ Check(eq, "Non-smi index"); - __ tst(value, Operand(kSmiTagMask)); - __ Check(eq, "Non-smi value"); - - __ ldr(ip, FieldMemOperand(string, String::kLengthOffset)); - __ cmp(index, ip); - __ Check(lt, "Index is too large"); - - __ cmp(index, Operand(Smi::FromInt(0))); - __ Check(ge, "Index is negative"); - - __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset)); - __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset)); - - __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask)); - static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; - static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; - __ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING - ? one_byte_seq_type : two_byte_seq_type)); - __ Check(eq, "Unexpected string type"); - } - - __ add(ip, - string, - Operand(SeqString::kHeaderSize - kHeapObjectTag)); - __ SmiUntag(value, value); - STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); - if (encoding == String::ONE_BYTE_ENCODING) { - // Smis are tagged by left shift by 1, thus LSR by 1 to smi-untag inline. - __ strb(value, MemOperand(ip, index, LSR, 1)); - } else { - // No need to untag a smi for two-byte addressing. - __ strh(value, MemOperand(ip, index)); - } -} - - -static MemOperand ExpConstant(int index, Register base) { - return MemOperand(base, index * kDoubleSize); -} - - -void MathExpGenerator::EmitMathExp(MacroAssembler* masm, - DoubleRegister input, - DoubleRegister result, - DoubleRegister double_scratch1, - DoubleRegister double_scratch2, - Register temp1, - Register temp2, - Register temp3) { - ASSERT(!input.is(result)); - ASSERT(!input.is(double_scratch1)); - ASSERT(!input.is(double_scratch2)); - ASSERT(!result.is(double_scratch1)); - ASSERT(!result.is(double_scratch2)); - ASSERT(!double_scratch1.is(double_scratch2)); - ASSERT(!temp1.is(temp2)); - ASSERT(!temp1.is(temp3)); - ASSERT(!temp2.is(temp3)); - ASSERT(ExternalReference::math_exp_constants(0).address() != NULL); - - Label done; - - __ mov(temp3, Operand(ExternalReference::math_exp_constants(0))); - - __ vldr(double_scratch1, ExpConstant(0, temp3)); - __ vmov(result, kDoubleRegZero); - __ VFPCompareAndSetFlags(double_scratch1, input); - __ b(ge, &done); - __ vldr(double_scratch2, ExpConstant(1, temp3)); - __ VFPCompareAndSetFlags(input, double_scratch2); - __ vldr(result, ExpConstant(2, temp3)); - __ b(ge, &done); - __ vldr(double_scratch1, ExpConstant(3, temp3)); - __ vldr(result, ExpConstant(4, temp3)); - __ vmul(double_scratch1, double_scratch1, input); - __ vadd(double_scratch1, double_scratch1, result); - __ vmov(temp2, temp1, double_scratch1); - __ vsub(double_scratch1, double_scratch1, result); - __ vldr(result, ExpConstant(6, temp3)); - __ vldr(double_scratch2, ExpConstant(5, temp3)); - __ vmul(double_scratch1, double_scratch1, double_scratch2); - __ vsub(double_scratch1, double_scratch1, input); - __ vsub(result, result, double_scratch1); - __ vmul(input, double_scratch1, double_scratch1); - __ vmul(result, result, input); - __ mov(temp1, Operand(temp2, LSR, 11)); - __ vldr(double_scratch2, ExpConstant(7, temp3)); - __ vmul(result, result, double_scratch2); - __ vsub(result, result, double_scratch1); - __ vldr(double_scratch2, ExpConstant(8, temp3)); - __ vadd(result, result, double_scratch2); - __ movw(ip, 0x7ff); - __ and_(temp2, temp2, Operand(ip)); - __ add(temp1, temp1, Operand(0x3ff)); - __ mov(temp1, Operand(temp1, LSL, 20)); - - // Must not call ExpConstant() after overwriting temp3! - __ mov(temp3, Operand(ExternalReference::math_exp_log_table())); - __ ldr(ip, MemOperand(temp3, temp2, LSL, 3)); - __ add(temp3, temp3, Operand(kPointerSize)); - __ ldr(temp2, MemOperand(temp3, temp2, LSL, 3)); - __ orr(temp1, temp1, temp2); - __ vmov(input, ip, temp1); - __ vmul(result, result, input); - __ bind(&done); -} - #undef __ -// add(r0, pc, Operand(-8)) -static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008; - -static byte* GetNoCodeAgeSequence(uint32_t* length) { - // The sequence of instructions that is patched out for aging code is the - // following boilerplate stack-building prologue that is found in FUNCTIONS - static bool initialized = false; - static uint32_t sequence[kNoCodeAgeSequenceLength]; - byte* byte_sequence = reinterpret_cast(sequence); - *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize; - if (!initialized) { - CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength); - PredictableCodeSizeScope scope(patcher.masm(), *length); - patcher.masm()->stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); - patcher.masm()->LoadRoot(ip, Heap::kUndefinedValueRootIndex); - patcher.masm()->add(fp, sp, Operand(2 * kPointerSize)); - initialized = true; - } - return byte_sequence; -} - - -bool Code::IsYoungSequence(byte* sequence) { - uint32_t young_length; - byte* young_sequence = GetNoCodeAgeSequence(&young_length); - bool result = !memcmp(sequence, young_sequence, young_length); - ASSERT(result || - Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction); - return result; -} - - -void Code::GetCodeAgeAndParity(byte* sequence, Age* age, - MarkingParity* parity) { - if (IsYoungSequence(sequence)) { - *age = kNoAge; - *parity = NO_MARKING_PARITY; - } else { - Address target_address = Memory::Address_at( - sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1)); - Code* stub = GetCodeFromTargetAddress(target_address); - GetCodeAgeAndParity(stub, age, parity); - } -} - - -void Code::PatchPlatformCodeAge(byte* sequence, - Code::Age age, - MarkingParity parity) { - uint32_t young_length; - byte* young_sequence = GetNoCodeAgeSequence(&young_length); - if (age == kNoAge) { - memcpy(sequence, young_sequence, young_length); - CPU::FlushICache(sequence, young_length); - } else { - Code* stub = GetCodeAgeStub(age, parity); - CodePatcher patcher(sequence, young_length / Assembler::kInstrSize); - patcher.masm()->add(r0, pc, Operand(-8)); - patcher.masm()->ldr(pc, MemOperand(pc, -4)); - patcher.masm()->dd(reinterpret_cast(stub->instruction_start())); - } -} - - } } // namespace v8::internal #endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index 8f0033e..c340e6b 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -88,22 +88,6 @@ class StringCharLoadGenerator : public AllStatic { DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator); }; - -class MathExpGenerator : public AllStatic { - public: - static void EmitMathExp(MacroAssembler* masm, - DoubleRegister input, - DoubleRegister result, - DoubleRegister double_scratch1, - DoubleRegister double_scratch2, - Register temp1, - Register temp2, - Register temp3); - - private: - DISALLOW_COPY_AND_ASSIGN(MathExpGenerator); -}; - } } // namespace v8::internal #endif // V8_ARM_CODEGEN_ARM_H_ diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h index a569383..4fa49e3 100644 --- a/deps/v8/src/arm/constants-arm.h +++ b/deps/v8/src/arm/constants-arm.h @@ -84,18 +84,9 @@ namespace v8 { namespace internal { // Constant pool marker. -// Use UDF, the permanently undefined instruction. -const int kConstantPoolMarkerMask = 0xfff000f0; -const int kConstantPoolMarker = 0xe7f000f0; -const int kConstantPoolLengthMaxMask = 0xffff; -inline int EncodeConstantPoolLength(int length) { - ASSERT((length & kConstantPoolLengthMaxMask) == length); - return ((length & 0xfff0) << 4) | (length & 0xf); -} -inline int DecodeConstantPoolLength(int instr) { - ASSERT((instr & kConstantPoolMarkerMask) == kConstantPoolMarker); - return ((instr >> 4) & 0xfff0) | (instr & 0xf); -} +const int kConstantPoolMarkerMask = 0xffe00000; +const int kConstantPoolMarker = 0x0c000000; +const int kConstantPoolLengthMask = 0x001ffff; // Number of registers in normal ARM mode. const int kNumRegisters = 16; diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc index ee2a581..19667b9 100644 --- a/deps/v8/src/arm/deoptimizer-arm.cc +++ b/deps/v8/src/arm/deoptimizer-arm.cc @@ -114,6 +114,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { } +static const int32_t kBranchBeforeStackCheck = 0x2a000001; static const int32_t kBranchBeforeInterrupt = 0x5a000004; @@ -122,21 +123,24 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, Code* check_code, Code* replacement_code) { const int kInstrSize = Assembler::kInstrSize; - // The back edge bookkeeping code matches the pattern: - // - // - // 2a 00 00 01 bpl ok + // The call of the stack guard check has the following form: + // e1 5d 00 0c cmp sp, + // 2a 00 00 01 bcs ok // e5 9f c? ?? ldr ip, [pc, ] // e1 2f ff 3c blx ip ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp); ASSERT(Assembler::IsLdrPcImmediateOffset( Assembler::instr_at(pc_after - 2 * kInstrSize))); - ASSERT_EQ(kBranchBeforeInterrupt, - Memory::int32_at(pc_after - 3 * kInstrSize)); + if (FLAG_count_based_interrupts) { + ASSERT_EQ(kBranchBeforeInterrupt, + Memory::int32_at(pc_after - 3 * kInstrSize)); + } else { + ASSERT_EQ(kBranchBeforeStackCheck, + Memory::int32_at(pc_after - 3 * kInstrSize)); + } // We patch the code to the following form: - // - // + // e1 5d 00 0c cmp sp, // e1 a0 00 00 mov r0, r0 (NOP) // e5 9f c? ?? ldr ip, [pc, ] // e1 2f ff 3c blx ip @@ -173,9 +177,15 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code, // Replace NOP with conditional jump. CodePatcher patcher(pc_after - 3 * kInstrSize, 1); - patcher.masm()->b(+16, pl); - ASSERT_EQ(kBranchBeforeInterrupt, - Memory::int32_at(pc_after - 3 * kInstrSize)); + if (FLAG_count_based_interrupts) { + patcher.masm()->b(+16, pl); + ASSERT_EQ(kBranchBeforeInterrupt, + Memory::int32_at(pc_after - 3 * kInstrSize)); + } else { + patcher.masm()->b(+4, cs); + ASSERT_EQ(kBranchBeforeStackCheck, + Memory::int32_at(pc_after - 3 * kInstrSize)); + } // Replace the stack check address in the constant pool // with the entry address of the replacement code. diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc index cb0a6cb..3c94a46 100644 --- a/deps/v8/src/arm/disasm-arm.cc +++ b/deps/v8/src/arm/disasm-arm.cc @@ -1098,7 +1098,6 @@ int Decoder::DecodeType7(Instruction* instr) { // Dd = vadd(Dn, Dm) // Dd = vsub(Dn, Dm) // Dd = vmul(Dn, Dm) -// Dd = vmla(Dn, Dm) // Dd = vdiv(Dn, Dm) // vcmp(Dd, Dm) // vmrs @@ -1161,12 +1160,6 @@ void Decoder::DecodeTypeVFP(Instruction* instr) { } else { Unknown(instr); // Not used by V8. } - } else if ((instr->Opc1Value() == 0x0) && !(instr->Opc3Value() & 0x1)) { - if (instr->SzValue() == 0x1) { - Format(instr, "vmla.f64'cond 'Dd, 'Dn, 'Dm"); - } else { - Unknown(instr); // Not used by V8. - } } else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) { if (instr->SzValue() == 0x1) { Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm"); @@ -1395,7 +1388,7 @@ bool Decoder::IsConstantPoolAt(byte* instr_ptr) { int Decoder::ConstantPoolSizeAt(byte* instr_ptr) { if (IsConstantPoolAt(instr_ptr)) { int instruction_bits = *(reinterpret_cast(instr_ptr)); - return DecodeConstantPoolLength(instruction_bits); + return instruction_bits & kConstantPoolLengthMask; } else { return -1; } @@ -1417,7 +1410,8 @@ int Decoder::InstructionDecode(byte* instr_ptr) { if ((instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker) { out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "constant pool begin (length %d)", - DecodeConstantPoolLength(instruction_bits)); + instruction_bits & + kConstantPoolLengthMask); return Instruction::kInstrSize; } switch (instr->TypeValue()) { diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index 3b560fe..be82283 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -130,7 +130,7 @@ void FullCodeGenerator::Generate() { handler_table_ = isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell( - Handle(Smi::FromInt(FLAG_interrupt_budget), isolate())); + Handle(Smi::FromInt(FLAG_interrupt_budget))); SetFunctionPosition(function()); Comment cmnt(masm_, "[ function compiled by full code generator"); @@ -164,19 +164,14 @@ void FullCodeGenerator::Generate() { int locals_count = info->scope()->num_stack_slots(); - info->set_prologue_offset(masm_->pc_offset()); - { - PredictableCodeSizeScope predictible_code_size_scope( - masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize); - // The following three instructions must remain together and unmodified - // for code aging to work properly. - __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); + __ Push(lr, fp, cp, r1); + if (locals_count > 0) { // Load undefined value here, so the value is ready for the loop // below. __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - // Adjust FP to point to saved FP. - __ add(fp, sp, Operand(2 * kPointerSize)); } + // Adjust fp to point to caller's fp. + __ add(fp, sp, Operand(2 * kPointerSize)); { Comment cmnt(masm_, "[ Allocate locals"); for (int i = 0; i < locals_count; i++) { @@ -292,7 +287,7 @@ void FullCodeGenerator::Generate() { __ LoadRoot(ip, Heap::kStackLimitRootIndex); __ cmp(sp, Operand(ip)); __ b(hs, &ok); - PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize); + PredictableCodeSizeScope predictable(masm_); StackCheckStub stub; __ CallStub(&stub); __ bind(&ok); @@ -347,31 +342,42 @@ void FullCodeGenerator::EmitProfilingCounterReset() { } -void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt, - Label* back_edge_target) { - Comment cmnt(masm_, "[ Back edge bookkeeping"); +void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt, + Label* back_edge_target) { + Comment cmnt(masm_, "[ Stack check"); // Block literal pools whilst emitting stack check code. Assembler::BlockConstPoolScope block_const_pool(masm_); Label ok; - int weight = 1; - if (FLAG_weighted_back_edges) { - ASSERT(back_edge_target->is_bound()); - int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); - weight = Min(kMaxBackEdgeWeight, - Max(1, distance / kBackEdgeDistanceUnit)); + if (FLAG_count_based_interrupts) { + int weight = 1; + if (FLAG_weighted_back_edges) { + ASSERT(back_edge_target->is_bound()); + int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); + weight = Min(kMaxBackEdgeWeight, + Max(1, distance / kBackEdgeDistanceUnit)); + } + EmitProfilingCounterDecrement(weight); + __ b(pl, &ok); + InterruptStub stub; + __ CallStub(&stub); + } else { + __ LoadRoot(ip, Heap::kStackLimitRootIndex); + __ cmp(sp, Operand(ip)); + __ b(hs, &ok); + PredictableCodeSizeScope predictable(masm_); + StackCheckStub stub; + __ CallStub(&stub); } - EmitProfilingCounterDecrement(weight); - __ b(pl, &ok); - InterruptStub stub; - __ CallStub(&stub); // Record a mapping of this PC offset to the OSR id. This is used to find // the AST id from the unoptimized code in order to use it as a key into // the deoptimization input data found in the optimized code. - RecordBackEdge(stmt->OsrEntryId()); + RecordStackCheck(stmt->OsrEntryId()); - EmitProfilingCounterReset(); + if (FLAG_count_based_interrupts) { + EmitProfilingCounterReset(); + } __ bind(&ok); PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); @@ -433,8 +439,7 @@ void FullCodeGenerator::EmitReturnSequence() { // tool from instrumenting as we rely on the code size here. int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize; CodeGenerator::RecordPositions(masm_, function()->end_position() - 1); - // TODO(svenpanne) The code below is sometimes 4 words, sometimes 5! - PredictableCodeSizeScope predictable(masm_, -1); + PredictableCodeSizeScope predictable(masm_); __ RecordJSReturn(); masm_->mov(sp, fp); masm_->ldm(ia_w, sp, fp.bit() | lr.bit()); @@ -909,33 +914,34 @@ void FullCodeGenerator::VisitFunctionDeclaration( void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) { - Variable* variable = declaration->proxy()->var(); - ASSERT(variable->location() == Variable::CONTEXT); - ASSERT(variable->interface()->IsFrozen()); - - Comment cmnt(masm_, "[ ModuleDeclaration"); - EmitDebugCheckDeclarationContext(variable); + VariableProxy* proxy = declaration->proxy(); + Variable* variable = proxy->var(); + Handle instance = declaration->module()->interface()->Instance(); + ASSERT(!instance.is_null()); - // Load instance object. - __ LoadContext(r1, scope_->ContextChainLength(scope_->GlobalScope())); - __ ldr(r1, ContextOperand(r1, variable->interface()->Index())); - __ ldr(r1, ContextOperand(r1, Context::EXTENSION_INDEX)); + switch (variable->location()) { + case Variable::UNALLOCATED: { + Comment cmnt(masm_, "[ ModuleDeclaration"); + globals_->Add(variable->name(), zone()); + globals_->Add(instance, zone()); + Visit(declaration->module()); + break; + } - // Assign it. - __ str(r1, ContextOperand(cp, variable->index())); - // We know that we have written a module, which is not a smi. - __ RecordWriteContextSlot(cp, - Context::SlotOffset(variable->index()), - r1, - r3, - kLRHasBeenSaved, - kDontSaveFPRegs, - EMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); - PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS); + case Variable::CONTEXT: { + Comment cmnt(masm_, "[ ModuleDeclaration"); + EmitDebugCheckDeclarationContext(variable); + __ mov(r1, Operand(instance)); + __ str(r1, ContextOperand(cp, variable->index())); + Visit(declaration->module()); + break; + } - // Traverse into body. - Visit(declaration->module()); + case Variable::PARAMETER: + case Variable::LOCAL: + case Variable::LOOKUP: + UNREACHABLE(); + } } @@ -978,14 +984,6 @@ void FullCodeGenerator::DeclareGlobals(Handle pairs) { } -void FullCodeGenerator::DeclareModules(Handle descriptions) { - // Call the runtime to declare the modules. - __ Push(descriptions); - __ CallRuntime(Runtime::kDeclareModules, 1); - // Return value is ignored. -} - - void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { Comment cmnt(masm_, "[ SwitchStatement"); Breakable nested_statement(this, stmt); @@ -1240,7 +1238,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ add(r0, r0, Operand(Smi::FromInt(1))); __ push(r0); - EmitBackEdgeBookkeeping(stmt, &loop); + EmitStackCheck(stmt, &loop); __ b(&loop); // Remove the pointers stored on the stack. @@ -1393,9 +1391,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var, } else if (var->mode() == DYNAMIC_LOCAL) { Variable* local = var->local_if_not_shadowed(); __ ldr(r0, ContextSlotOperandCheckExtensions(local, slow)); - if (local->mode() == LET || - local->mode() == CONST || - local->mode() == CONST_HARMONY) { + if (local->mode() == CONST || + local->mode() == CONST_HARMONY || + local->mode() == LET) { __ CompareRoot(r0, Heap::kTheHoleValueRootIndex); if (local->mode() == CONST) { __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); @@ -2376,7 +2374,7 @@ void FullCodeGenerator::VisitCall(Call* expr) { VariableProxy* proxy = callee->AsVariableProxy(); Property* property = callee->AsProperty(); - if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) { + if (proxy != NULL && proxy->var()->is_possibly_eval()) { // In a call to eval, we first call %ResolvePossiblyDirectEval to // resolve the function we need to call and the receiver of the // call. Then we call the resolved function using the given @@ -3131,39 +3129,6 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) { } -void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) { - ZoneList* args = expr->arguments(); - ASSERT_EQ(3, args->length()); - - VisitForStackValue(args->at(1)); // index - VisitForStackValue(args->at(2)); // value - __ pop(r2); - __ pop(r1); - VisitForAccumulatorValue(args->at(0)); // string - - static const String::Encoding encoding = String::ONE_BYTE_ENCODING; - SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2); - context()->Plug(r0); -} - - -void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) { - ZoneList* args = expr->arguments(); - ASSERT_EQ(3, args->length()); - - VisitForStackValue(args->at(1)); // index - VisitForStackValue(args->at(2)); // value - __ pop(r2); - __ pop(r1); - VisitForAccumulatorValue(args->at(0)); // string - - static const String::Encoding encoding = String::TWO_BYTE_ENCODING; - SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2); - context()->Plug(r0); -} - - - void FullCodeGenerator::EmitMathPow(CallRuntime* expr) { // Load the arguments on the stack and call the runtime function. ZoneList* args = expr->arguments(); @@ -3656,7 +3621,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset)); __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout); - __ ldr(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset)); + __ ldr(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset)); __ add(string_length, string_length, Operand(scratch1), SetCC); __ b(vs, &bailout); __ cmp(element, elements_end); @@ -3685,7 +3650,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { // Add (separator length times array_length) - separator length to the // string_length to get the length of the result string. array_length is not // smi but the other values are, so the result is a smi - __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset)); + __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset)); __ sub(string_length, string_length, Operand(scratch1)); __ smull(scratch2, ip, array_length, scratch1); // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are @@ -3723,10 +3688,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { array_length = no_reg; __ add(result_pos, result, - Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); // Check the length of the separator. - __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset)); + __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset)); __ cmp(scratch1, Operand(Smi::FromInt(1))); __ b(eq, &one_char_separator); __ b(gt, &long_separator); @@ -3742,9 +3707,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ ldr(string, MemOperand(element, kPointerSize, PostIndex)); __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset)); __ SmiUntag(string_length); - __ add(string, - string, - Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); __ CopyBytes(string, result_pos, string_length, scratch1); __ cmp(element, elements_end); __ b(lt, &empty_separator_loop); // End while (element < elements_end). @@ -3754,7 +3717,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { // One-character separator case __ bind(&one_char_separator); // Replace separator with its ASCII character value. - __ ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize)); + __ ldrb(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize)); // Jump into the loop after the code that copies the separator, so the first // element is not preceded by a separator __ jmp(&one_char_separator_loop_entry); @@ -3774,9 +3737,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ ldr(string, MemOperand(element, kPointerSize, PostIndex)); __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset)); __ SmiUntag(string_length); - __ add(string, - string, - Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); __ CopyBytes(string, result_pos, string_length, scratch1); __ cmp(element, elements_end); __ b(lt, &one_char_separator_loop); // End while (element < elements_end). @@ -3797,16 +3758,14 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ SmiUntag(string_length); __ add(string, separator, - Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); __ CopyBytes(string, result_pos, string_length, scratch1); __ bind(&long_separator); __ ldr(string, MemOperand(element, kPointerSize, PostIndex)); __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset)); __ SmiUntag(string_length); - __ add(string, - string, - Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); __ CopyBytes(string, result_pos, string_length, scratch1); __ cmp(element, elements_end); __ b(lt, &long_separator_loop); // End while (element < elements_end). @@ -4111,8 +4070,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { // Call stub. Undo operation first. __ sub(r0, r0, Operand(Smi::FromInt(count_value))); } - __ mov(r1, r0); - __ mov(r0, Operand(Smi::FromInt(count_value))); + __ mov(r1, Operand(Smi::FromInt(count_value))); // Record position before stub call. SetSourcePosition(expr->position()); @@ -4337,7 +4295,29 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { default: { VisitForAccumulatorValue(expr->right()); - Condition cond = CompareIC::ComputeCondition(op); + Condition cond = eq; + switch (op) { + case Token::EQ_STRICT: + case Token::EQ: + cond = eq; + break; + case Token::LT: + cond = lt; + break; + case Token::GT: + cond = gt; + break; + case Token::LTE: + cond = le; + break; + case Token::GTE: + cond = ge; + break; + case Token::IN: + case Token::INSTANCEOF: + default: + UNREACHABLE(); + } __ pop(r1); bool inline_smi_code = ShouldInlineSmiCase(op); diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index 29a3687..4839589 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -1379,6 +1379,7 @@ static void KeyedStoreGenerateGenericHelper( __ bind(&fast_double_without_map_check); __ StoreNumberToDoubleElements(value, key, + receiver, elements, // Overwritten. r3, // Scratch regs... r4, @@ -1698,15 +1699,36 @@ Condition CompareIC::ComputeCondition(Token::Value op) { } -bool CompareIC::HasInlinedSmiCode(Address address) { - // The address of the instruction following the call. - Address cmp_instruction_address = - Assembler::return_address_from_call_start(address); +void CompareIC::UpdateCaches(Handle x, Handle y) { + HandleScope scope; + Handle rewritten; + State previous_state = GetState(); + State state = TargetState(previous_state, false, x, y); + if (state == GENERIC) { + CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0); + rewritten = stub.GetCode(); + } else { + ICCompareStub stub(op_, state); + if (state == KNOWN_OBJECTS) { + stub.set_known_map(Handle(Handle::cast(x)->map())); + } + rewritten = stub.GetCode(); + } + set_target(*rewritten); - // If the instruction following the call is not a cmp rx, #yyy, nothing - // was inlined. - Instr instr = Assembler::instr_at(cmp_instruction_address); - return Assembler::IsCmpImmediate(instr); +#ifdef DEBUG + if (FLAG_trace_ic) { + PrintF("[CompareIC (%s->%s)#%s]\n", + GetStateName(previous_state), + GetStateName(state), + Token::Name(op_)); + } +#endif + + // Activate inlined smi code. + if (previous_state == UNINITIALIZED) { + PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK); + } } diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index 4203673..21c549f 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -177,7 +177,6 @@ const char* LArithmeticT::Mnemonic() const { case Token::BIT_AND: return "bit-and-t"; case Token::BIT_OR: return "bit-or-t"; case Token::BIT_XOR: return "bit-xor-t"; - case Token::ROR: return "ror-t"; case Token::SHL: return "shl-t"; case Token::SAR: return "sar-t"; case Token::SHR: return "shr-t"; @@ -297,11 +296,6 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) { } -void LMathExp::PrintDataTo(StringStream* stream) { - value()->PrintTo(stream); -} - - void LLoadContextSlot::PrintDataTo(StringStream* stream) { context()->PrintTo(stream); stream->Add("[%d]", slot_index()); @@ -378,27 +372,20 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) { } -void LLoadKeyed::PrintDataTo(StringStream* stream) { - elements()->PrintTo(stream); +void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); stream->Add("["); key()->PrintTo(stream); - if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d]", additional_index()); - } else { - stream->Add("]"); - } + stream->Add("] <- "); + value()->PrintTo(stream); } -void LStoreKeyed::PrintDataTo(StringStream* stream) { +void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) { elements()->PrintTo(stream); stream->Add("["); key()->PrintTo(stream); - if (hydrogen()->IsDehoisted()) { - stream->Add(" + %d] <-", additional_index()); - } else { - stream->Add("] <- "); - } + stream->Add("] <- "); value()->PrintTo(stream); } @@ -1046,15 +1033,6 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { LOperand* input = UseFixedDouble(instr->value(), d2); LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL); return MarkAsCall(DefineFixedDouble(result, d2), instr); - } else if (op == kMathExp) { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->value()->representation().IsDouble()); - LOperand* input = UseTempRegister(instr->value()); - LOperand* temp1 = TempRegister(); - LOperand* temp2 = TempRegister(); - LOperand* double_temp = FixedTemp(d3); // Chosen by fair dice roll. - LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2); - return DefineAsRegister(result); } else if (op == kMathPowHalf) { LOperand* input = UseFixedDouble(instr->value(), d2); LOperand* temp = FixedTemp(d3); @@ -1130,11 +1108,6 @@ LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) { } -LInstruction* LChunkBuilder::DoRor(HRor* instr) { - return DoShift(Token::ROR, instr); -} - - LInstruction* LChunkBuilder::DoShr(HShr* instr) { return DoShift(Token::SHR, instr); } @@ -1333,21 +1306,8 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) { return DefineAsRegister(mul); } else if (instr->representation().IsDouble()) { - if (instr->UseCount() == 1 && instr->uses().value()->IsAdd()) { - HAdd* add = HAdd::cast(instr->uses().value()); - if (instr == add->left()) { - // This mul is the lhs of an add. The add and mul will be folded - // into a multiply-add. - return NULL; - } - if (instr == add->right() && !add->left()->IsMul()) { - // This mul is the rhs of an add, where the lhs is not another mul. - // The add and mul will be folded into a multiply-add. - return NULL; - } - } - return DoArithmeticD(Token::MUL, instr); + } else { return DoArithmeticT(Token::MUL, instr); } @@ -1358,12 +1318,6 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) { if (instr->representation().IsInteger32()) { ASSERT(instr->left()->representation().IsInteger32()); ASSERT(instr->right()->representation().IsInteger32()); - - if (instr->left()->IsConstant()) { - // If lhs is constant, do reverse subtraction instead. - return DoRSub(instr); - } - LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseOrConstantAtStart(instr->right()); LSubI* sub = new(zone()) LSubI(left, right); @@ -1380,32 +1334,6 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) { } -LInstruction* LChunkBuilder::DoRSub(HSub* instr) { - ASSERT(instr->representation().IsInteger32()); - ASSERT(instr->left()->representation().IsInteger32()); - ASSERT(instr->right()->representation().IsInteger32()); - - // Note: The lhs of the subtraction becomes the rhs of the - // reverse-subtraction. - LOperand* left = UseRegisterAtStart(instr->right()); - LOperand* right = UseOrConstantAtStart(instr->left()); - LRSubI* rsb = new(zone()) LRSubI(left, right); - LInstruction* result = DefineAsRegister(rsb); - if (instr->CheckFlag(HValue::kCanOverflow)) { - result = AssignEnvironment(result); - } - return result; -} - - -LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) { - LOperand* multiplier_op = UseRegisterAtStart(mul->left()); - LOperand* multiplicand_op = UseRegisterAtStart(mul->right()); - LOperand* addend_op = UseRegisterAtStart(addend); - return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op, - multiplicand_op)); -} - LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { if (instr->representation().IsInteger32()) { ASSERT(instr->left()->representation().IsInteger32()); @@ -1419,14 +1347,6 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { } return result; } else if (instr->representation().IsDouble()) { - if (instr->left()->IsMul()) - return DoMultiplyAdd(HMul::cast(instr->left()), instr->right()); - - if (instr->right()->IsMul()) { - ASSERT(!instr->left()->IsMul()); - return DoMultiplyAdd(HMul::cast(instr->right()), instr->left()); - } - return DoArithmeticD(Token::ADD, instr); } else { ASSERT(instr->representation().IsTagged()); @@ -1492,7 +1412,7 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { LInstruction* LChunkBuilder::DoCompareIDAndBranch( HCompareIDAndBranch* instr) { - Representation r = instr->representation(); + Representation r = instr->GetInputRepresentation(); if (r.IsInteger32()) { ASSERT(instr->left()->representation().IsInteger32()); ASSERT(instr->right()->representation().IsInteger32()); @@ -1646,16 +1566,6 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) { } -LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) { - LOperand* string = UseRegister(instr->string()); - LOperand* index = UseRegister(instr->index()); - LOperand* value = UseRegister(instr->value()); - LSeqStringSetChar* result = - new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value); - return DefineAsRegister(result); -} - - LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { LOperand* value = UseRegisterOrConstantAtStart(instr->index()); LOperand* length = UseRegister(instr->length()); @@ -1779,10 +1689,10 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) { - LUnallocated* temp1 = TempRegister(); + LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); - LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2); - return AssignEnvironment(Define(result, temp1)); + LInstruction* result = new(zone()) LCheckPrototypeMaps(temp1, temp2); + return AssignEnvironment(result); } @@ -1950,40 +1860,53 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer( } -LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { +LInstruction* LChunkBuilder::DoLoadKeyedFastElement( + HLoadKeyedFastElement* instr) { + ASSERT(instr->representation().IsTagged()); ASSERT(instr->key()->representation().IsInteger32() || instr->key()->representation().IsTagged()); - ElementsKind elements_kind = instr->elements_kind(); + LOperand* obj = UseRegisterAtStart(instr->object()); LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - LLoadKeyed* result = NULL; + LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key); + if (instr->RequiresHoleCheck()) AssignEnvironment(result); + return DefineAsRegister(result); +} - if (!instr->is_external()) { - LOperand* obj = NULL; - if (instr->representation().IsDouble()) { - obj = UseTempRegister(instr->elements()); - } else { - ASSERT(instr->representation().IsTagged()); - obj = UseRegisterAtStart(instr->elements()); - } - result = new(zone()) LLoadKeyed(obj, key); - } else { - ASSERT( - (instr->representation().IsInteger32() && - (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && - (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || - (instr->representation().IsDouble() && - ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || - (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); - LOperand* external_pointer = UseRegister(instr->elements()); - result = new(zone()) LLoadKeyed(external_pointer, key); - } - DefineAsRegister(result); +LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement( + HLoadKeyedFastDoubleElement* instr) { + ASSERT(instr->representation().IsDouble()); + ASSERT(instr->key()->representation().IsInteger32() || + instr->key()->representation().IsTagged()); + LOperand* elements = UseTempRegister(instr->elements()); + LOperand* key = UseRegisterOrConstantAtStart(instr->key()); + LLoadKeyedFastDoubleElement* result = + new(zone()) LLoadKeyedFastDoubleElement(elements, key); + return AssignEnvironment(DefineAsRegister(result)); +} + + +LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement( + HLoadKeyedSpecializedArrayElement* instr) { + ElementsKind elements_kind = instr->elements_kind(); + ASSERT( + (instr->representation().IsInteger32() && + (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && + (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || + (instr->representation().IsDouble() && + ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || + (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); + ASSERT(instr->key()->representation().IsInteger32() || + instr->key()->representation().IsTagged()); + LOperand* external_pointer = UseRegister(instr->external_pointer()); + LOperand* key = UseRegisterOrConstant(instr->key()); + LLoadKeyedSpecializedArrayElement* result = + new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key); + LInstruction* load_instr = DefineAsRegister(result); // An unsigned int array load might overflow and cause a deopt, make sure it // has an environment. - bool can_deoptimize = instr->RequiresHoleCheck() || - (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS); - return can_deoptimize ? AssignEnvironment(result) : result; + return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ? + AssignEnvironment(load_instr) : load_instr; } @@ -1997,48 +1920,66 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) { } -LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { - ElementsKind elements_kind = instr->elements_kind(); +LInstruction* LChunkBuilder::DoStoreKeyedFastElement( + HStoreKeyedFastElement* instr) { + bool needs_write_barrier = instr->NeedsWriteBarrier(); + ASSERT(instr->value()->representation().IsTagged()); + ASSERT(instr->object()->representation().IsTagged()); + ASSERT(instr->key()->representation().IsInteger32() || + instr->key()->representation().IsTagged()); - if (!instr->is_external()) { - ASSERT(instr->elements()->representation().IsTagged()); - bool needs_write_barrier = instr->NeedsWriteBarrier(); - LOperand* object = NULL; - LOperand* key = NULL; - LOperand* val = NULL; - - if (instr->value()->representation().IsDouble()) { - object = UseRegisterAtStart(instr->elements()); - val = UseTempRegister(instr->value()); - key = UseRegisterOrConstantAtStart(instr->key()); - } else { - ASSERT(instr->value()->representation().IsTagged()); - object = UseTempRegister(instr->elements()); - val = needs_write_barrier ? UseTempRegister(instr->value()) - : UseRegisterAtStart(instr->value()); - key = needs_write_barrier ? UseTempRegister(instr->key()) - : UseRegisterOrConstantAtStart(instr->key()); - } + LOperand* obj = UseTempRegister(instr->object()); + LOperand* val = needs_write_barrier + ? UseTempRegister(instr->value()) + : UseRegisterAtStart(instr->value()); + LOperand* key = needs_write_barrier + ? UseTempRegister(instr->key()) + : UseRegisterOrConstantAtStart(instr->key()); + return new(zone()) LStoreKeyedFastElement(obj, key, val); +} - return new(zone()) LStoreKeyed(object, key, val); - } +LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement( + HStoreKeyedFastDoubleElement* instr) { + ASSERT(instr->value()->representation().IsDouble()); + ASSERT(instr->elements()->representation().IsTagged()); + ASSERT(instr->key()->representation().IsInteger32() || + instr->key()->representation().IsTagged()); + + LOperand* elements = UseRegisterAtStart(instr->elements()); + LOperand* val = UseTempRegister(instr->value()); + LOperand* key = UseRegisterOrConstantAtStart(instr->key()); + + return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val); +} + + +LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement( + HStoreKeyedSpecializedArrayElement* instr) { + ElementsKind elements_kind = instr->elements_kind(); ASSERT( (instr->value()->representation().IsInteger32() && (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || (instr->value()->representation().IsDouble() && ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || - (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); - ASSERT(instr->elements()->representation().IsExternal()); + (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); + ASSERT(instr->external_pointer()->representation().IsExternal()); + ASSERT(instr->key()->representation().IsInteger32() || + instr->key()->representation().IsTagged()); + + LOperand* external_pointer = UseRegister(instr->external_pointer()); bool val_is_temp_register = elements_kind == EXTERNAL_PIXEL_ELEMENTS || elements_kind == EXTERNAL_FLOAT_ELEMENTS; - LOperand* val = val_is_temp_register ? UseTempRegister(instr->value()) + LOperand* val = val_is_temp_register + ? UseTempRegister(instr->value()) : UseRegister(instr->value()); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - LOperand* external_pointer = UseRegister(instr->elements()); - return new(zone()) LStoreKeyed(external_pointer, key, val); + LOperand* key = UseRegisterOrConstant(instr->key()); + + return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer, + key, + val); } @@ -2261,7 +2202,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { env->set_ast_id(instr->ast_id()); env->Drop(instr->pop_count()); - for (int i = instr->values()->length() - 1; i >= 0; --i) { + for (int i = 0; i < instr->values()->length(); ++i) { HValue* value = instr->values()->at(i); if (instr->HasAssignedIndexAt(i)) { env->Bind(instr->GetAssignedIndexAt(i), value); diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h index 7397b4b..fb36fe9 100644 --- a/deps/v8/src/arm/lithium-arm.h +++ b/deps/v8/src/arm/lithium-arm.h @@ -125,18 +125,18 @@ class LCodeGen; V(LoadFunctionPrototype) \ V(LoadGlobalCell) \ V(LoadGlobalGeneric) \ - V(LoadKeyed) \ + V(LoadKeyedFastDoubleElement) \ + V(LoadKeyedFastElement) \ V(LoadKeyedGeneric) \ + V(LoadKeyedSpecializedArrayElement) \ V(LoadNamedField) \ V(LoadNamedFieldPolymorphic) \ V(LoadNamedGeneric) \ V(MapEnumLength) \ - V(MathExp) \ V(MathFloorOfDiv) \ V(MathMinMax) \ V(ModI) \ V(MulI) \ - V(MultiplyAddD) \ V(NumberTagD) \ V(NumberTagI) \ V(NumberTagU) \ @@ -150,7 +150,6 @@ class LCodeGen; V(Random) \ V(RegExpLiteral) \ V(Return) \ - V(SeqStringSetChar) \ V(ShiftI) \ V(SmiTag) \ V(SmiUntag) \ @@ -158,8 +157,10 @@ class LCodeGen; V(StoreContextSlot) \ V(StoreGlobalCell) \ V(StoreGlobalGeneric) \ - V(StoreKeyed) \ + V(StoreKeyedFastDoubleElement) \ + V(StoreKeyedFastElement) \ V(StoreKeyedGeneric) \ + V(StoreKeyedSpecializedArrayElement) \ V(StoreNamedField) \ V(StoreNamedGeneric) \ V(StringAdd) \ @@ -168,7 +169,6 @@ class LCodeGen; V(StringCompareAndBranch) \ V(StringLength) \ V(SubI) \ - V(RSubI) \ V(TaggedToI) \ V(ThisFunction) \ V(Throw) \ @@ -625,24 +625,6 @@ class LMulI: public LTemplateInstruction<1, 2, 1> { }; -// Instruction for computing multiplier * multiplicand + addend. -class LMultiplyAddD: public LTemplateInstruction<1, 3, 0> { - public: - LMultiplyAddD(LOperand* addend, LOperand* multiplier, - LOperand* multiplicand) { - inputs_[0] = addend; - inputs_[1] = multiplier; - inputs_[2] = multiplicand; - } - - LOperand* addend() { return inputs_[0]; } - LOperand* multiplier() { return inputs_[1]; } - LOperand* multiplicand() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d") -}; - - class LCmpIDAndBranch: public LControlInstruction<2, 0> { public: LCmpIDAndBranch(LOperand* left, LOperand* right) { @@ -658,7 +640,7 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> { Token::Value op() const { return hydrogen()->token(); } bool is_double() const { - return hydrogen()->representation().IsDouble(); + return hydrogen()->GetInputRepresentation().IsDouble(); } virtual void PrintDataTo(StringStream* stream); @@ -683,30 +665,6 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> { }; -class LMathExp: public LTemplateInstruction<1, 1, 3> { - public: - LMathExp(LOperand* value, - LOperand* double_temp, - LOperand* temp1, - LOperand* temp2) { - inputs_[0] = value; - temps_[0] = temp1; - temps_[1] = temp2; - temps_[2] = double_temp; - ExternalReference::InitializeMathExpData(); - } - - LOperand* value() { return inputs_[0]; } - LOperand* temp1() { return temps_[0]; } - LOperand* temp2() { return temps_[1]; } - LOperand* double_temp() { return temps_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp") - - virtual void PrintDataTo(StringStream* stream); -}; - - class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> { public: LCmpObjectEqAndBranch(LOperand* left, LOperand* right) { @@ -1031,21 +989,6 @@ class LSubI: public LTemplateInstruction<1, 2, 0> { }; -class LRSubI: public LTemplateInstruction<1, 2, 0> { - public: - LRSubI(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; - } - - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(RSubI, "rsub-i") - DECLARE_HYDROGEN_ACCESSOR(Sub) -}; - - class LConstantI: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i") @@ -1199,30 +1142,6 @@ class LDateField: public LTemplateInstruction<1, 1, 1> { }; -class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> { - public: - LSeqStringSetChar(String::Encoding encoding, - LOperand* string, - LOperand* index, - LOperand* value) : encoding_(encoding) { - inputs_[0] = string; - inputs_[1] = index; - inputs_[2] = value; - } - - String::Encoding encoding() { return encoding_; } - LOperand* string() { return inputs_[0]; } - LOperand* index() { return inputs_[1]; } - LOperand* value() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char") - DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar) - - private: - String::Encoding encoding_; -}; - - class LThrow: public LTemplateInstruction<0, 1, 0> { public: explicit LThrow(LOperand* value) { @@ -1438,26 +1357,58 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> { }; -class LLoadKeyed: public LTemplateInstruction<1, 2, 0> { +class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> { public: - LLoadKeyed(LOperand* elements, LOperand* key) { + LLoadKeyedFastElement(LOperand* elements, LOperand* key) { inputs_[0] = elements; inputs_[1] = key; } LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } - ElementsKind elements_kind() const { - return hydrogen()->elements_kind(); + + DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element") + DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement) + + uint32_t additional_index() const { return hydrogen()->index_offset(); } +}; + + +class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> { + public: + LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) { + inputs_[0] = elements; + inputs_[1] = key; } - bool is_external() const { - return hydrogen()->is_external(); + + LOperand* elements() { return inputs_[0]; } + LOperand* key() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement, + "load-keyed-fast-double-element") + DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement) + + uint32_t additional_index() const { return hydrogen()->index_offset(); } +}; + + +class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { + public: + LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) { + inputs_[0] = external_pointer; + inputs_[1] = key; } - DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyed) + LOperand* external_pointer() { return inputs_[0]; } + LOperand* key() { return inputs_[1]; } - virtual void PrintDataTo(StringStream* stream); + DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement, + "load-keyed-specialized-array-element") + DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement) + + ElementsKind elements_kind() const { + return hydrogen()->elements_kind(); + } uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1971,28 +1922,51 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> { }; -class LStoreKeyed: public LTemplateInstruction<0, 3, 0> { +class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> { public: - LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) { + LStoreKeyedFastElement(LOperand* object, LOperand* key, LOperand* value) { inputs_[0] = object; inputs_[1] = key; inputs_[2] = value; } - bool is_external() const { return hydrogen()->is_external(); } - LOperand* elements() { return inputs_[0]; } + LOperand* object() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } LOperand* value() { return inputs_[2]; } - ElementsKind elements_kind() const { - return hydrogen()->elements_kind(); + + DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, + "store-keyed-fast-element") + DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement) + + virtual void PrintDataTo(StringStream* stream); + + uint32_t additional_index() const { return hydrogen()->index_offset(); } +}; + + +class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> { + public: + LStoreKeyedFastDoubleElement(LOperand* elements, + LOperand* key, + LOperand* value) { + inputs_[0] = elements; + inputs_[1] = key; + inputs_[2] = value; } - DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyed) + LOperand* elements() { return inputs_[0]; } + LOperand* key() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement, + "store-keyed-fast-double-element") + DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement) virtual void PrintDataTo(StringStream* stream); - bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } + uint32_t additional_index() const { return hydrogen()->index_offset(); } + + bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } }; @@ -2016,6 +1990,28 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> { StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); } }; +class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> { + public: + LStoreKeyedSpecializedArrayElement(LOperand* external_pointer, + LOperand* key, + LOperand* value) { + inputs_[0] = external_pointer; + inputs_[1] = key; + inputs_[2] = value; + } + + LOperand* external_pointer() { return inputs_[0]; } + LOperand* key() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement, + "store-keyed-specialized-array-element") + DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement) + + ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } + uint32_t additional_index() const { return hydrogen()->index_offset(); } +}; + class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> { public: @@ -2138,7 +2134,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> { }; -class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 2> { +class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> { public: LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) { temps_[0] = temp; @@ -2482,9 +2478,6 @@ class LChunkBuilder BASE_EMBEDDED { HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) #undef DECLARE_DO - LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend); - LInstruction* DoRSub(HSub* instr); - static bool HasMagicNumberForDivisor(int32_t divisor); static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val); static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val); diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index 06b0216..6f5aa43 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -146,20 +146,8 @@ bool LCodeGen::GeneratePrologue() { __ bind(&ok); } - - info()->set_prologue_offset(masm_->pc_offset()); - { - PredictableCodeSizeScope predictible_code_size_scope( - masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize); - // The following three instructions must remain together and unmodified - // for code aging to work properly. - __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); - // Load undefined value here, so the value is ready for the loop - // below. - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - // Adjust FP to point to saved FP. - __ add(fp, sp, Operand(2 * kPointerSize)); - } + __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); + __ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP. // Reserve space for the stack slots needed by the code. int slots = GetStackSlotCount(); @@ -234,30 +222,7 @@ bool LCodeGen::GenerateBody() { } if (emit_instructions) { - if (FLAG_code_comments) { - HValue* hydrogen = instr->hydrogen_value(); - if (hydrogen != NULL) { - if (hydrogen->IsChange()) { - HValue* changed_value = HChange::cast(hydrogen)->value(); - int use_id = 0; - const char* use_mnemo = "dead"; - if (hydrogen->UseCount() >= 1) { - HValue* use_value = hydrogen->uses().value(); - use_id = use_value->id(); - use_mnemo = use_value->Mnemonic(); - } - Comment(";;; @%d: %s. ", - current_instruction_, instr->Mnemonic(), - changed_value->id(), changed_value->Mnemonic(), - use_id, use_mnemo); - } else { - Comment(";;; @%d: %s. <#%d>", current_instruction_, - instr->Mnemonic(), hydrogen->id()); - } - } else { - Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); - } - } + Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); instr->CompileToNative(this); } } @@ -1324,18 +1289,6 @@ void LCodeGen::DoDivI(LDivI* instr) { } -void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { - DwVfpRegister addend = ToDoubleRegister(instr->addend()); - DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); - DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); - - // This is computed in-place. - ASSERT(addend.is(ToDoubleRegister(instr->result()))); - - __ vmla(addend, multiplier, multiplicand); -} - - void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) { const Register result = ToRegister(instr->result()); const Register left = ToRegister(instr->left()); @@ -1536,9 +1489,6 @@ void LCodeGen::DoShiftI(LShiftI* instr) { // Mask the right_op operand. __ and_(scratch, ToRegister(right_op), Operand(0x1F)); switch (instr->op()) { - case Token::ROR: - __ mov(result, Operand(left, ROR, scratch)); - break; case Token::SAR: __ mov(result, Operand(left, ASR, scratch)); break; @@ -1562,13 +1512,6 @@ void LCodeGen::DoShiftI(LShiftI* instr) { int value = ToInteger32(LConstantOperand::cast(right_op)); uint8_t shift_count = static_cast(value & 0x1F); switch (instr->op()) { - case Token::ROR: - if (shift_count != 0) { - __ mov(result, Operand(left, ROR, shift_count)); - } else { - __ Move(result, left); - } - break; case Token::SAR: if (shift_count != 0) { __ mov(result, Operand(left, ASR, shift_count)); @@ -1623,27 +1566,6 @@ void LCodeGen::DoSubI(LSubI* instr) { } -void LCodeGen::DoRSubI(LRSubI* instr) { - LOperand* left = instr->left(); - LOperand* right = instr->right(); - LOperand* result = instr->result(); - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - SBit set_cond = can_overflow ? SetCC : LeaveCC; - - if (right->IsStackSlot() || right->IsArgument()) { - Register right_reg = EmitLoadRegister(right, ip); - __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); - } else { - ASSERT(right->IsRegister() || right->IsConstantOperand()); - __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); - } - - if (can_overflow) { - DeoptimizeIf(vs, instr->environment()); - } -} - - void LCodeGen::DoConstantI(LConstantI* instr) { ASSERT(instr->result()->IsRegister()); __ mov(ToRegister(instr->result()), Operand(instr->value())); @@ -1764,15 +1686,6 @@ void LCodeGen::DoDateField(LDateField* instr) { } -void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { - SeqStringSetCharGenerator::Generate(masm(), - instr->encoding(), - ToRegister(instr->string()), - ToRegister(instr->index()), - ToRegister(instr->value())); -} - - void LCodeGen::DoBitNotI(LBitNotI* instr) { Register input = ToRegister(instr->value()); Register result = ToRegister(instr->result()); @@ -2560,7 +2473,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { // We use Factory::the_hole_value() on purpose instead of loading from the // root array to force relocation to be able to later patch with // the cached map. - PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize); + PredictableCodeSizeScope predictable(masm_); Handle cell = factory()->NewJSGlobalPropertyCell(factory()->the_hole_value()); __ mov(ip, Operand(Handle(cell))); @@ -2624,7 +2537,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, static const int kAdditionalDelta = 5; // Make sure that code size is predicable, since we use specific constants // offsets in the code to find embedded values.. - PredictableCodeSizeScope predictable(masm_, 6 * Assembler::kInstrSize); + PredictableCodeSizeScope predictable(masm_); int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta; Label before_push_delta; __ bind(&before_push_delta); @@ -3005,87 +2918,50 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { } -void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { - Register external_pointer = ToRegister(instr->elements()); - Register key = no_reg; - ElementsKind elements_kind = instr->elements_kind(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int constant_key = 0; - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort("array index constant value too big."); - } +void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { + Register elements = ToRegister(instr->elements()); + Register result = ToRegister(instr->result()); + Register scratch = scratch0(); + Register store_base = scratch; + int offset = 0; + + if (instr->key()->IsConstantOperand()) { + LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); + offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + + instr->additional_index()); + store_base = elements; } else { - key = ToRegister(instr->key()); + Register key = EmitLoadRegister(instr->key(), scratch0()); + // Even though the HLoadKeyedFastElement instruction forces the input + // representation for the key to be an integer, the input gets replaced + // during bound check elimination with the index argument to the bounds + // check, which can be tagged, so that case must be handled here, too. + if (instr->hydrogen()->key()->representation().IsTagged()) { + __ add(scratch, elements, + Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); + } else { + __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); + } + offset = FixedArray::OffsetOfElementAt(instr->additional_index()); } - int element_size_shift = ElementsKindToShiftSize(elements_kind); - int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) - ? (element_size_shift - kSmiTagSize) : element_size_shift; - int additional_offset = instr->additional_index() << element_size_shift; + __ ldr(result, FieldMemOperand(store_base, offset)); - if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || - elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { - CpuFeatures::Scope scope(VFP3); - DwVfpRegister result = ToDoubleRegister(instr->result()); - Operand operand = key_is_constant - ? Operand(constant_key << element_size_shift) - : Operand(key, LSL, shift_size); - __ add(scratch0(), external_pointer, operand); - if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { - __ vldr(result.low(), scratch0(), additional_offset); - __ vcvt_f64_f32(result, result.low()); - } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS - __ vldr(result, scratch0(), additional_offset); - } - } else { - Register result = ToRegister(instr->result()); - MemOperand mem_operand = PrepareKeyedOperand( - key, external_pointer, key_is_constant, constant_key, - element_size_shift, shift_size, - instr->additional_index(), additional_offset); - switch (elements_kind) { - case EXTERNAL_BYTE_ELEMENTS: - __ ldrsb(result, mem_operand); - break; - case EXTERNAL_PIXEL_ELEMENTS: - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - __ ldrb(result, mem_operand); - break; - case EXTERNAL_SHORT_ELEMENTS: - __ ldrsh(result, mem_operand); - break; - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - __ ldrh(result, mem_operand); - break; - case EXTERNAL_INT_ELEMENTS: - __ ldr(result, mem_operand); - break; - case EXTERNAL_UNSIGNED_INT_ELEMENTS: - __ ldr(result, mem_operand); - if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { - __ cmp(result, Operand(0x80000000)); - DeoptimizeIf(cs, instr->environment()); - } - break; - case EXTERNAL_FLOAT_ELEMENTS: - case EXTERNAL_DOUBLE_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case DICTIONARY_ELEMENTS: - case NON_STRICT_ARGUMENTS_ELEMENTS: - UNREACHABLE(); - break; + // Check for the hole value. + if (instr->hydrogen()->RequiresHoleCheck()) { + if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { + __ tst(result, Operand(kSmiTagMask)); + DeoptimizeIf(ne, instr->environment()); + } else { + __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); + __ cmp(result, scratch); + DeoptimizeIf(eq, instr->environment()); } } } -void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { +void LCodeGen::DoLoadKeyedFastDoubleElement( + LLoadKeyedFastDoubleElement* instr) { Register elements = ToRegister(instr->elements()); bool key_is_constant = instr->key()->IsConstantOperand(); Register key = no_reg; @@ -3117,65 +2993,13 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { (instr->additional_index() << element_size_shift))); } - __ vldr(result, elements, 0); if (instr->hydrogen()->RequiresHoleCheck()) { __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); __ cmp(scratch, Operand(kHoleNanUpper32)); DeoptimizeIf(eq, instr->environment()); } -} - -void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { - Register elements = ToRegister(instr->elements()); - Register result = ToRegister(instr->result()); - Register scratch = scratch0(); - Register store_base = scratch; - int offset = 0; - - if (instr->key()->IsConstantOperand()) { - LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + - instr->additional_index()); - store_base = elements; - } else { - Register key = EmitLoadRegister(instr->key(), scratch0()); - // Even though the HLoadKeyed instruction forces the input - // representation for the key to be an integer, the input gets replaced - // during bound check elimination with the index argument to the bounds - // check, which can be tagged, so that case must be handled here, too. - if (instr->hydrogen()->key()->representation().IsTagged()) { - __ add(scratch, elements, - Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); - } else { - __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); - } - offset = FixedArray::OffsetOfElementAt(instr->additional_index()); - } - __ ldr(result, FieldMemOperand(store_base, offset)); - - // Check for the hole value. - if (instr->hydrogen()->RequiresHoleCheck()) { - if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { - __ tst(result, Operand(kSmiTagMask)); - DeoptimizeIf(ne, instr->environment()); - } else { - __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); - __ cmp(result, scratch); - DeoptimizeIf(eq, instr->environment()); - } - } -} - - -void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { - if (instr->is_external()) { - DoLoadKeyedExternalArray(instr); - } else if (instr->hydrogen()->representation().IsDouble()) { - DoLoadKeyedFixedDoubleArray(instr); - } else { - DoLoadKeyedFixedArray(instr); - } + __ vldr(result, elements, 0); } @@ -3215,6 +3039,87 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key, } +void LCodeGen::DoLoadKeyedSpecializedArrayElement( + LLoadKeyedSpecializedArrayElement* instr) { + Register external_pointer = ToRegister(instr->external_pointer()); + Register key = no_reg; + ElementsKind elements_kind = instr->elements_kind(); + bool key_is_constant = instr->key()->IsConstantOperand(); + int constant_key = 0; + if (key_is_constant) { + constant_key = ToInteger32(LConstantOperand::cast(instr->key())); + if (constant_key & 0xF0000000) { + Abort("array index constant value too big."); + } + } else { + key = ToRegister(instr->key()); + } + int element_size_shift = ElementsKindToShiftSize(elements_kind); + int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) + ? (element_size_shift - kSmiTagSize) : element_size_shift; + int additional_offset = instr->additional_index() << element_size_shift; + + if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || + elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { + CpuFeatures::Scope scope(VFP3); + DwVfpRegister result = ToDoubleRegister(instr->result()); + Operand operand = key_is_constant + ? Operand(constant_key << element_size_shift) + : Operand(key, LSL, shift_size); + __ add(scratch0(), external_pointer, operand); + if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { + __ vldr(result.low(), scratch0(), additional_offset); + __ vcvt_f64_f32(result, result.low()); + } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS + __ vldr(result, scratch0(), additional_offset); + } + } else { + Register result = ToRegister(instr->result()); + MemOperand mem_operand = PrepareKeyedOperand( + key, external_pointer, key_is_constant, constant_key, + element_size_shift, shift_size, + instr->additional_index(), additional_offset); + switch (elements_kind) { + case EXTERNAL_BYTE_ELEMENTS: + __ ldrsb(result, mem_operand); + break; + case EXTERNAL_PIXEL_ELEMENTS: + case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: + __ ldrb(result, mem_operand); + break; + case EXTERNAL_SHORT_ELEMENTS: + __ ldrsh(result, mem_operand); + break; + case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: + __ ldrh(result, mem_operand); + break; + case EXTERNAL_INT_ELEMENTS: + __ ldr(result, mem_operand); + break; + case EXTERNAL_UNSIGNED_INT_ELEMENTS: + __ ldr(result, mem_operand); + if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { + __ cmp(result, Operand(0x80000000)); + DeoptimizeIf(cs, instr->environment()); + } + break; + case EXTERNAL_FLOAT_ELEMENTS: + case EXTERNAL_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_DOUBLE_ELEMENTS: + case FAST_ELEMENTS: + case FAST_SMI_ELEMENTS: + case DICTIONARY_ELEMENTS: + case NON_STRICT_ARGUMENTS_ELEMENTS: + UNREACHABLE(); + break; + } + } +} + + void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { ASSERT(ToRegister(instr->object()).is(r1)); ASSERT(ToRegister(instr->key()).is(r0)); @@ -3820,20 +3725,6 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) { } -void LCodeGen::DoMathExp(LMathExp* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - DoubleRegister result = ToDoubleRegister(instr->result()); - DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); - DoubleRegister double_scratch2 = double_scratch0(); - Register temp1 = ToRegister(instr->temp1()); - Register temp2 = ToRegister(instr->temp2()); - - MathExpGenerator::EmitMathExp( - masm(), input, result, double_scratch1, double_scratch2, - temp1, temp2, scratch0()); -} - - void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { ASSERT(ToDoubleRegister(instr->result()).is(d2)); TranscendentalCacheStub stub(TranscendentalCache::LOG, @@ -4109,8 +4000,102 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { } -void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { - Register external_pointer = ToRegister(instr->elements()); +void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { + Register value = ToRegister(instr->value()); + Register elements = ToRegister(instr->object()); + Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; + Register scratch = scratch0(); + Register store_base = scratch; + int offset = 0; + + // Do the store. + if (instr->key()->IsConstantOperand()) { + ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); + LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); + offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + + instr->additional_index()); + store_base = elements; + } else { + // Even though the HLoadKeyedFastElement instruction forces the input + // representation for the key to be an integer, the input gets replaced + // during bound check elimination with the index argument to the bounds + // check, which can be tagged, so that case must be handled here, too. + if (instr->hydrogen()->key()->representation().IsTagged()) { + __ add(scratch, elements, + Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); + } else { + __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); + } + offset = FixedArray::OffsetOfElementAt(instr->additional_index()); + } + __ str(value, FieldMemOperand(store_base, offset)); + + if (instr->hydrogen()->NeedsWriteBarrier()) { + HType type = instr->hydrogen()->value()->type(); + SmiCheck check_needed = + type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; + // Compute address of modified element and store it into key register. + __ add(key, store_base, Operand(offset - kHeapObjectTag)); + __ RecordWrite(elements, + key, + value, + kLRHasBeenSaved, + kSaveFPRegs, + EMIT_REMEMBERED_SET, + check_needed); + } +} + + +void LCodeGen::DoStoreKeyedFastDoubleElement( + LStoreKeyedFastDoubleElement* instr) { + DwVfpRegister value = ToDoubleRegister(instr->value()); + Register elements = ToRegister(instr->elements()); + Register key = no_reg; + Register scratch = scratch0(); + bool key_is_constant = instr->key()->IsConstantOperand(); + int constant_key = 0; + + // Calculate the effective address of the slot in the array to store the + // double value. + if (key_is_constant) { + constant_key = ToInteger32(LConstantOperand::cast(instr->key())); + if (constant_key & 0xF0000000) { + Abort("array index constant value too big."); + } + } else { + key = ToRegister(instr->key()); + } + int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); + int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) + ? (element_size_shift - kSmiTagSize) : element_size_shift; + Operand operand = key_is_constant + ? Operand((constant_key << element_size_shift) + + FixedDoubleArray::kHeaderSize - kHeapObjectTag) + : Operand(key, LSL, shift_size); + __ add(scratch, elements, operand); + if (!key_is_constant) { + __ add(scratch, scratch, + Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + } + + if (instr->NeedsCanonicalization()) { + // Check for NaN. All NaNs must be canonicalized. + __ VFPCompareAndSetFlags(value, value); + // Only load canonical NaN if the comparison above set the overflow. + __ Vmov(value, + FixedDoubleArray::canonical_not_the_hole_nan_as_double(), + no_reg, vs); + } + + __ vstr(value, scratch, instr->additional_index() << element_size_shift); +} + + +void LCodeGen::DoStoreKeyedSpecializedArrayElement( + LStoreKeyedSpecializedArrayElement* instr) { + + Register external_pointer = ToRegister(instr->external_pointer()); Register key = no_reg; ElementsKind elements_kind = instr->elements_kind(); bool key_is_constant = instr->key()->IsConstantOperand(); @@ -4179,110 +4164,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { } -void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { - DwVfpRegister value = ToDoubleRegister(instr->value()); - Register elements = ToRegister(instr->elements()); - Register key = no_reg; - Register scratch = scratch0(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int constant_key = 0; - - // Calculate the effective address of the slot in the array to store the - // double value. - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort("array index constant value too big."); - } - } else { - key = ToRegister(instr->key()); - } - int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); - int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) - ? (element_size_shift - kSmiTagSize) : element_size_shift; - Operand operand = key_is_constant - ? Operand((constant_key << element_size_shift) + - FixedDoubleArray::kHeaderSize - kHeapObjectTag) - : Operand(key, LSL, shift_size); - __ add(scratch, elements, operand); - if (!key_is_constant) { - __ add(scratch, scratch, - Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); - } - - if (instr->NeedsCanonicalization()) { - // Check for NaN. All NaNs must be canonicalized. - __ VFPCompareAndSetFlags(value, value); - // Only load canonical NaN if the comparison above set the overflow. - __ Vmov(value, - FixedDoubleArray::canonical_not_the_hole_nan_as_double(), - no_reg, vs); - } - - __ vstr(value, scratch, instr->additional_index() << element_size_shift); -} - - -void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { - Register value = ToRegister(instr->value()); - Register elements = ToRegister(instr->elements()); - Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) - : no_reg; - Register scratch = scratch0(); - Register store_base = scratch; - int offset = 0; - - // Do the store. - if (instr->key()->IsConstantOperand()) { - ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); - LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + - instr->additional_index()); - store_base = elements; - } else { - // Even though the HLoadKeyed instruction forces the input - // representation for the key to be an integer, the input gets replaced - // during bound check elimination with the index argument to the bounds - // check, which can be tagged, so that case must be handled here, too. - if (instr->hydrogen()->key()->representation().IsTagged()) { - __ add(scratch, elements, - Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); - } else { - __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); - } - offset = FixedArray::OffsetOfElementAt(instr->additional_index()); - } - __ str(value, FieldMemOperand(store_base, offset)); - - if (instr->hydrogen()->NeedsWriteBarrier()) { - HType type = instr->hydrogen()->value()->type(); - SmiCheck check_needed = - type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - // Compute address of modified element and store it into key register. - __ add(key, store_base, Operand(offset - kHeapObjectTag)); - __ RecordWrite(elements, - key, - value, - kLRHasBeenSaved, - kSaveFPRegs, - EMIT_REMEMBERED_SET, - check_needed); - } -} - - -void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { - // By cases: external, fast double - if (instr->is_external()) { - DoStoreKeyedExternalArray(instr); - } else if (instr->hydrogen()->value()->representation().IsDouble()) { - DoStoreKeyedFixedDoubleArray(instr); - } else { - DoStoreKeyedFixedArray(instr); - } -} - - void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { ASSERT(ToRegister(instr->object()).is(r2)); ASSERT(ToRegister(instr->key()).is(r1)); @@ -4740,6 +4621,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { if (instr->truncating()) { Register scratch3 = ToRegister(instr->temp2()); + SwVfpRegister single_scratch = double_scratch.low(); ASSERT(!scratch3.is(input_reg) && !scratch3.is(scratch1) && !scratch3.is(scratch2)); @@ -4761,7 +4643,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { __ EmitECMATruncate(input_reg, double_scratch2, - double_scratch, + single_scratch, scratch1, scratch2, scratch3); @@ -4843,19 +4725,20 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { Register scratch1 = scratch0(); Register scratch2 = ToRegister(instr->temp()); DwVfpRegister double_input = ToDoubleRegister(instr->value()); - DwVfpRegister double_scratch = double_scratch0(); Label done; if (instr->truncating()) { Register scratch3 = ToRegister(instr->temp2()); + SwVfpRegister single_scratch = double_scratch0().low(); __ EmitECMATruncate(result_reg, double_input, - double_scratch, + single_scratch, scratch1, scratch2, scratch3); } else { + DwVfpRegister double_scratch = double_scratch0(); __ EmitVFPTruncate(kRoundToMinusInf, result_reg, double_input, @@ -5029,7 +4912,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { - ASSERT(instr->temp()->Equals(instr->result())); Register temp1 = ToRegister(instr->temp()); Register temp2 = ToRegister(instr->temp2()); @@ -5054,6 +4936,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { DoCheckMapCommon(temp1, temp2, Handle(current_prototype->map()), ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); + DeoptimizeIf(ne, instr->environment()); } @@ -5664,7 +5547,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { __ cmp(sp, Operand(ip)); __ b(hs, &done); StackCheckStub stub; - PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize); + PredictableCodeSizeScope predictable(masm_); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); EnsureSpaceForLazyDeopt(); __ bind(&done); diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h index 921285b..9281537 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.h +++ b/deps/v8/src/arm/lithium-codegen-arm.h @@ -377,12 +377,6 @@ class LCodeGen BASE_EMBEDDED { }; void EnsureSpaceForLazyDeopt(); - void DoLoadKeyedExternalArray(LLoadKeyed* instr); - void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); - void DoLoadKeyedFixedArray(LLoadKeyed* instr); - void DoStoreKeyedExternalArray(LStoreKeyed* instr); - void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr); - void DoStoreKeyedFixedArray(LStoreKeyed* instr); Zone* zone_; LPlatformChunk* const chunk_; diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index dc1dc1d..623bd6a 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -422,17 +422,6 @@ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src, void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index, Condition cond) { - if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) && - !Heap::RootCanBeWrittenAfterInitialization(index) && - !predictable_code_size()) { - Handle root(isolate()->heap()->roots_array_start()[index]); - if (!isolate()->heap()->InNewSpace(*root)) { - // The CPU supports fast immediate values, and this root will never - // change. We will load it as a relocatable immediate value. - mov(destination, Operand(root), LeaveCC, cond); - return; - } - } ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond); } @@ -1787,10 +1776,10 @@ void MacroAssembler::AllocateAsciiString(Register result, Label* gc_required) { // Calculate the number of bytes needed for the characters in the string while // observing object alignment. - ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); + ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); ASSERT(kCharSize == 1); add(scratch1, length, - Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize)); + Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize)); and_(scratch1, scratch1, Operand(~kObjectAlignmentMask)); // Allocate ASCII string in new space. @@ -1956,13 +1945,13 @@ void MacroAssembler::CheckFastSmiElements(Register map, void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, Register key_reg, + Register receiver_reg, Register elements_reg, Register scratch1, Register scratch2, Register scratch3, Register scratch4, - Label* fail, - int elements_offset) { + Label* fail) { Label smi_value, maybe_nan, have_double_value, is_nan, done; Register mantissa_reg = scratch2; Register exponent_reg = scratch3; @@ -1989,10 +1978,8 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, bind(&have_double_value); add(scratch1, elements_reg, Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); - str(mantissa_reg, FieldMemOperand( - scratch1, FixedDoubleArray::kHeaderSize - elements_offset)); - uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset + - sizeof(kHoleNanLower32); + str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize)); + uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); str(exponent_reg, FieldMemOperand(scratch1, offset)); jmp(&done); @@ -2013,8 +2000,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, bind(&smi_value); add(scratch1, elements_reg, - Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag - - elements_offset)); + Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); add(scratch1, scratch1, Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); // scratch1 is now effective address of the double element @@ -2223,28 +2209,12 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, add(r6, r6, Operand(1)); str(r6, MemOperand(r7, kLevelOffset)); - if (FLAG_log_timer_events) { - FrameScope frame(this, StackFrame::MANUAL); - PushSafepointRegisters(); - PrepareCallCFunction(0, r0); - CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0); - PopSafepointRegisters(); - } - // Native call returns to the DirectCEntry stub which redirects to the // return address pushed on stack (could have moved after GC). // DirectCEntry stub itself is generated early and never moves. DirectCEntryStub stub; stub.GenerateCall(this, function); - if (FLAG_log_timer_events) { - FrameScope frame(this, StackFrame::MANUAL); - PushSafepointRegisters(); - PrepareCallCFunction(0, r0); - CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0); - PopSafepointRegisters(); - } - Label promote_scheduled_exception; Label delete_allocated_handles; Label leave_exit_frame; @@ -2490,20 +2460,6 @@ void MacroAssembler::ConvertToInt32(Register source, } -void MacroAssembler::TryFastDoubleToInt32(Register result, - DwVfpRegister double_input, - DwVfpRegister double_scratch, - Label* done) { - ASSERT(!double_input.is(double_scratch)); - - vcvt_s32_f64(double_scratch.low(), double_input); - vmov(result, double_scratch.low()); - vcvt_f64_s32(double_scratch, double_scratch.low()); - VFPCompareAndSetFlags(double_input, double_scratch); - b(eq, done); -} - - void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode, Register result, DwVfpRegister double_input, @@ -2519,7 +2475,11 @@ void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode, Label done; // Test for values that can be exactly represented as a signed 32-bit integer. - TryFastDoubleToInt32(result, double_input, double_scratch, &done); + vcvt_s32_f64(double_scratch.low(), double_input); + vmov(result, double_scratch.low()); + vcvt_f64_s32(double_scratch, double_scratch.low()); + VFPCompareAndSetFlags(double_input, double_scratch); + b(eq, &done); // Convert to integer, respecting rounding mode. int32_t check_inexact_conversion = @@ -2636,7 +2596,7 @@ void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result, void MacroAssembler::EmitECMATruncate(Register result, DwVfpRegister double_input, - DwVfpRegister double_scratch, + SwVfpRegister single_scratch, Register scratch, Register input_high, Register input_low) { @@ -2647,18 +2607,16 @@ void MacroAssembler::EmitECMATruncate(Register result, ASSERT(!scratch.is(result) && !scratch.is(input_high) && !scratch.is(input_low)); - ASSERT(!double_input.is(double_scratch)); + ASSERT(!single_scratch.is(double_input.low()) && + !single_scratch.is(double_input.high())); Label done; - // Test for values that can be exactly represented as a signed 32-bit integer. - TryFastDoubleToInt32(result, double_input, double_scratch, &done); - // Clear cumulative exception flags. ClearFPSCRBits(kVFPExceptionMask, scratch); // Try a conversion to a signed integer. - vcvt_s32_f64(double_scratch.low(), double_input); - vmov(result, double_scratch.low()); + vcvt_s32_f64(single_scratch, double_input); + vmov(result, single_scratch); // Retrieve he FPSCR. vmrs(scratch); // Check for overflow and NaNs. @@ -3370,10 +3328,8 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( Register scratch2, Label* failure) { int kFlatAsciiStringMask = - kIsNotStringMask | kStringEncodingMask | kAsciiDataHintMask | - kStringRepresentationMask; + kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; int kFlatAsciiStringTag = ASCII_STRING_TYPE; - ASSERT_EQ(ASCII_STRING_TYPE, ASCII_STRING_TYPE & kFlatAsciiStringMask); and_(scratch1, first, Operand(kFlatAsciiStringMask)); and_(scratch2, second, Operand(kFlatAsciiStringMask)); cmp(scratch1, Operand(kFlatAsciiStringTag)); @@ -3387,10 +3343,8 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type, Register scratch, Label* failure) { int kFlatAsciiStringMask = - kIsNotStringMask | kStringEncodingMask | kAsciiDataHintMask | - kStringRepresentationMask; + kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; int kFlatAsciiStringTag = ASCII_STRING_TYPE; - ASSERT_EQ(ASCII_STRING_TYPE, ASCII_STRING_TYPE & kFlatAsciiStringMask); and_(scratch, type, Operand(kFlatAsciiStringMask)); cmp(scratch, Operand(kFlatAsciiStringTag)); b(ne, failure); @@ -3730,7 +3684,7 @@ void MacroAssembler::EnsureNotWhite( // For ASCII (char-size of 1) we shift the smi tag away to get the length. // For UC16 (char-size of 2) we just leave the smi tag in place, thereby // getting the length multiplied by 2. - ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4); + ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4); ASSERT(kSmiTag == 0 && kSmiTagSize == 1); ldr(ip, FieldMemOperand(value, String::kLengthOffset)); tst(instance_type, Operand(kStringEncodingMask)); diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 15cef16..e3e39a3 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -322,7 +322,6 @@ class MacroAssembler: public Assembler { // Push a handle. void Push(Handle handle); - void Push(Smi* smi) { Push(Handle(smi)); } // Push two registers. Pushes leftmost register first (to highest address). void Push(Register src1, Register src2, Condition cond = al) { @@ -832,14 +831,14 @@ class MacroAssembler: public Assembler { // case scratch2, scratch3 and scratch4 are unmodified. void StoreNumberToDoubleElements(Register value_reg, Register key_reg, + Register receiver_reg, // All regs below here overwritten. Register elements_reg, Register scratch1, Register scratch2, Register scratch3, Register scratch4, - Label* fail, - int elements_offset = 0); + Label* fail); // Compare an object's map with the specified map and its transitioned // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are @@ -894,15 +893,12 @@ class MacroAssembler: public Assembler { // Load and check the instance type of an object for being a string. // Loads the type into the second argument register. - // Returns a condition that will be enabled if the object was a string - // and the passed-in condition passed. If the passed-in condition failed - // then flags remain unchanged. + // Returns a condition that will be enabled if the object was a string. Condition IsObjectStringType(Register obj, - Register type, - Condition cond = al) { - ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond); - ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond); - tst(type, Operand(kIsNotStringMask), cond); + Register type) { + ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset)); + ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset)); + tst(type, Operand(kIsNotStringMask)); ASSERT_EQ(0, kStringTag); return eq; } @@ -959,14 +955,6 @@ class MacroAssembler: public Assembler { DwVfpRegister double_scratch, Label *not_int32); - // Try to convert a double to a signed 32-bit integer. If the double value - // can be exactly represented as an integer, the code jumps to 'done' and - // 'result' contains the integer value. Otherwise, the code falls through. - void TryFastDoubleToInt32(Register result, - DwVfpRegister double_input, - DwVfpRegister double_scratch, - Label* done); - // Truncates a double using a specific rounding mode, and writes the value // to the result register. // Clears the z flag (ne condition) if an overflow occurs. @@ -997,7 +985,7 @@ class MacroAssembler: public Assembler { // Exits with 'result' holding the answer and all other registers clobbered. void EmitECMATruncate(Register result, DwVfpRegister double_input, - DwVfpRegister double_scratch, + SwVfpRegister single_scratch, Register scratch, Register scratch2, Register scratch3); @@ -1214,7 +1202,7 @@ class MacroAssembler: public Assembler { // Souce and destination can be the same register. void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case); - // Jump if the register contains a smi. + // Jump the register contains a smi. inline void JumpIfSmi(Register value, Label* smi_label) { tst(value, Operand(kSmiTagMask)); b(eq, smi_label); diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc index d852d23..17b8677 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc @@ -1150,7 +1150,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address, Handle subject(frame_entry(re_frame, kInputString)); // Current string. - bool is_ascii = subject->IsOneByteRepresentationUnderneath(); + bool is_ascii = subject->IsAsciiRepresentationUnderneath(); ASSERT(re_code->instruction_start() <= *return_address); ASSERT(*return_address <= @@ -1181,7 +1181,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address, } // String might have changed. - if (subject_tmp->IsOneByteRepresentation() != is_ascii) { + if (subject_tmp->IsAsciiRepresentation() != is_ascii) { // If we changed between an ASCII and an UC16 string, the specialized // code cannot be used, and we need to restart regexp matching from // scratch (including, potentially, compiling a new version of the code). diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index d11e340..5b8ba2a 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -1387,14 +1387,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) { } case ROR: { - if (shift_amount == 0) { - *carry_out = c_flag_; - } else { - uint32_t left = static_cast(result) >> shift_amount; - uint32_t right = static_cast(result) << (32 - shift_amount); - result = right | left; - *carry_out = (static_cast(result) >> 31) != 0; - } + UNIMPLEMENTED(); break; } @@ -1466,14 +1459,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) { } case ROR: { - if (shift_amount == 0) { - *carry_out = c_flag_; - } else { - uint32_t left = static_cast(result) >> shift_amount; - uint32_t right = static_cast(result) << (32 - shift_amount); - result = right | left; - *carry_out = (static_cast(result) >> 31) != 0; - } + UNIMPLEMENTED(); break; } @@ -2778,20 +2764,6 @@ void Simulator::DecodeTypeVFP(Instruction* instr) { double dm_value = get_double_from_d_register(vm); double dd_value = dn_value * dm_value; set_d_register_from_double(vd, dd_value); - } else if ((instr->Opc1Value() == 0x0) && !(instr->Opc3Value() & 0x1)) { - // vmla - if (instr->SzValue() != 0x1) { - UNREACHABLE(); // Not used by V8. - } - - double dd_value = get_double_from_d_register(vd); - double dn_value = get_double_from_d_register(vn); - double dm_value = get_double_from_d_register(vm); - - // Note: we do the mul and add in separate steps to avoid getting a result - // with too high precision. - set_d_register_from_double(vd, dn_value * dm_value); - set_d_register_from_double(vd, get_double_from_d_register(vd) + dd_value); } else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) { // vdiv if (instr->SzValue() != 0x1) { @@ -3301,7 +3273,33 @@ void Simulator::Execute() { } -void Simulator::CallInternal(byte* entry) { +int32_t Simulator::Call(byte* entry, int argument_count, ...) { + va_list parameters; + va_start(parameters, argument_count); + // Set up arguments + + // First four arguments passed in registers. + ASSERT(argument_count >= 4); + set_register(r0, va_arg(parameters, int32_t)); + set_register(r1, va_arg(parameters, int32_t)); + set_register(r2, va_arg(parameters, int32_t)); + set_register(r3, va_arg(parameters, int32_t)); + + // Remaining arguments passed on stack. + int original_stack = get_register(sp); + // Compute position of stack on entry to generated code. + int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)); + if (OS::ActivationFrameAlignment() != 0) { + entry_stack &= -OS::ActivationFrameAlignment(); + } + // Store remaining arguments on stack, from low to high memory. + intptr_t* stack_argument = reinterpret_cast(entry_stack); + for (int i = 4; i < argument_count; i++) { + stack_argument[i - 4] = va_arg(parameters, int32_t); + } + va_end(parameters); + set_register(sp, entry_stack); + // Prepare to execute the code at entry set_register(pc, reinterpret_cast(entry)); // Put down marker for end of simulation. The simulator will stop simulation @@ -3355,37 +3353,6 @@ void Simulator::CallInternal(byte* entry) { set_register(r9, r9_val); set_register(r10, r10_val); set_register(r11, r11_val); -} - - -int32_t Simulator::Call(byte* entry, int argument_count, ...) { - va_list parameters; - va_start(parameters, argument_count); - // Set up arguments - - // First four arguments passed in registers. - ASSERT(argument_count >= 4); - set_register(r0, va_arg(parameters, int32_t)); - set_register(r1, va_arg(parameters, int32_t)); - set_register(r2, va_arg(parameters, int32_t)); - set_register(r3, va_arg(parameters, int32_t)); - - // Remaining arguments passed on stack. - int original_stack = get_register(sp); - // Compute position of stack on entry to generated code. - int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)); - if (OS::ActivationFrameAlignment() != 0) { - entry_stack &= -OS::ActivationFrameAlignment(); - } - // Store remaining arguments on stack, from low to high memory. - intptr_t* stack_argument = reinterpret_cast(entry_stack); - for (int i = 4; i < argument_count; i++) { - stack_argument[i - 4] = va_arg(parameters, int32_t); - } - va_end(parameters); - set_register(sp, entry_stack); - - CallInternal(entry); // Pop stack passed arguments. CHECK_EQ(entry_stack, get_register(sp)); @@ -3396,27 +3363,6 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) { } -double Simulator::CallFP(byte* entry, double d0, double d1) { - if (use_eabi_hardfloat()) { - set_d_register_from_double(0, d0); - set_d_register_from_double(1, d1); - } else { - int buffer[2]; - ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0)); - memcpy(buffer, &d0, sizeof(d0)); - set_dw_register(0, buffer); - memcpy(buffer, &d1, sizeof(d1)); - set_dw_register(2, buffer); - } - CallInternal(entry); - if (use_eabi_hardfloat()) { - return get_double_from_d_register(0); - } else { - return get_double_from_register_pair(0); - } -} - - uintptr_t Simulator::PushAddress(uintptr_t address) { int new_sp = get_register(sp) - sizeof(uintptr_t); uintptr_t* stack_slot = reinterpret_cast(new_sp); diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h index ec47fa1..abc91bb 100644 --- a/deps/v8/src/arm/simulator-arm.h +++ b/deps/v8/src/arm/simulator-arm.h @@ -205,8 +205,6 @@ class Simulator { // generated RegExp code with 7 parameters. This is a convenience function, // which sets up the simulator state and grabs the result on return. int32_t Call(byte* entry, int argument_count, ...); - // Alternative: call a 2-argument double function. - double CallFP(byte* entry, double d0, double d1); // Push an address onto the JS stack. uintptr_t PushAddress(uintptr_t address); @@ -358,8 +356,6 @@ class Simulator { template void SetVFPRegister(int reg_index, const InputType& value); - void CallInternal(byte* entry); - // Architecture state. // Saturating instructions require a Q flag to indicate saturation. // There is currently no way to read the CPSR directly, and thus read the Q diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index a194dfa..d3b5862 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -327,23 +327,18 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm, Register dst, Register src, Handle holder, - PropertyIndex index) { - if (index.is_header_index()) { - int offset = index.header_index() * kPointerSize; + int index) { + // Adjust for the number of properties stored in the holder. + index -= holder->map()->inobject_properties(); + if (index < 0) { + // Get the property straight out of the holder. + int offset = holder->map()->instance_size() + (index * kPointerSize); __ ldr(dst, FieldMemOperand(src, offset)); } else { - // Adjust for the number of properties stored in the holder. - int slot = index.field_index() - holder->map()->inobject_properties(); - if (slot < 0) { - // Get the property straight out of the holder. - int offset = holder->map()->instance_size() + (slot * kPointerSize); - __ ldr(dst, FieldMemOperand(src, offset)); - } else { - // Calculate the offset into the properties array. - int offset = slot * kPointerSize + FixedArray::kHeaderSize; - __ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset)); - __ ldr(dst, FieldMemOperand(dst, offset)); - } + // Calculate the offset into the properties array. + int offset = index * kPointerSize + FixedArray::kHeaderSize; + __ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset)); + __ ldr(dst, FieldMemOperand(dst, offset)); } } @@ -1201,7 +1196,7 @@ void StubCompiler::GenerateLoadField(Handle object, Register scratch1, Register scratch2, Register scratch3, - PropertyIndex index, + int index, Handle name, Label* miss) { // Check that the receiver isn't a smi. @@ -1550,7 +1545,7 @@ void CallStubCompiler::GenerateMissBranch() { Handle CallStubCompiler::CompileCallField(Handle object, Handle holder, - PropertyIndex index, + int index, Handle name) { // ----------- S t a t e ------------- // -- r2 : name @@ -1623,7 +1618,7 @@ Handle CallStubCompiler::CompileArrayPushCall( Label call_builtin; if (argc == 1) { // Otherwise fall through to call the builtin. - Label attempt_to_grow_elements, with_write_barrier, check_double; + Label attempt_to_grow_elements; Register elements = r6; Register end_elements = r5; @@ -1634,9 +1629,10 @@ Handle CallStubCompiler::CompileArrayPushCall( __ CheckMap(elements, r0, Heap::kFixedArrayMapRootIndex, - &check_double, + &call_builtin, DONT_DO_SMI_CHECK); + // Get the array's length into r0 and calculate new length. __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); STATIC_ASSERT(kSmiTagSize == 1); @@ -1651,6 +1647,7 @@ Handle CallStubCompiler::CompileArrayPushCall( __ b(gt, &attempt_to_grow_elements); // Check if value is a smi. + Label with_write_barrier; __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); __ JumpIfNotSmi(r4, &with_write_barrier); @@ -1670,40 +1667,6 @@ Handle CallStubCompiler::CompileArrayPushCall( __ Drop(argc + 1); __ Ret(); - __ bind(&check_double); - - // Check that the elements are in fast mode and writable. - __ CheckMap(elements, - r0, - Heap::kFixedDoubleArrayMapRootIndex, - &call_builtin, - DONT_DO_SMI_CHECK); - - // Get the array's length into r0 and calculate new length. - __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); - STATIC_ASSERT(kSmiTagSize == 1); - STATIC_ASSERT(kSmiTag == 0); - __ add(r0, r0, Operand(Smi::FromInt(argc))); - - // Get the elements' length. - __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset)); - - // Check if we could survive without allocation. - __ cmp(r0, r4); - __ b(gt, &call_builtin); - - __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); - __ StoreNumberToDoubleElements( - r4, r0, elements, r3, r5, r2, r9, - &call_builtin, argc * kDoubleSize); - - // Save new length. - __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); - - // Check for a smi. - __ Drop(argc + 1); - __ Ret(); - __ bind(&with_write_barrier); __ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset)); @@ -1715,11 +1678,6 @@ Handle CallStubCompiler::CompileArrayPushCall( // In case of fast smi-only, convert to fast object, otherwise bail out. __ bind(¬_fast_object); __ CheckFastSmiElements(r3, r7, &call_builtin); - - __ ldr(r7, FieldMemOperand(r4, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); - __ cmp(r7, ip); - __ b(eq, &call_builtin); // edx: receiver // r3: map Label try_holey_map; @@ -2954,7 +2912,7 @@ Handle LoadStubCompiler::CompileLoadNonexistent(Handle name, Handle LoadStubCompiler::CompileLoadField(Handle object, Handle holder, - PropertyIndex index, + int index, Handle name) { // ----------- S t a t e ------------- // -- r0 : receiver @@ -3143,7 +3101,7 @@ Handle LoadStubCompiler::CompileLoadGlobal( Handle KeyedLoadStubCompiler::CompileLoadField(Handle name, Handle receiver, Handle holder, - PropertyIndex index) { + int index) { // ----------- S t a t e ------------- // -- lr : return address // -- r0 : key @@ -3509,13 +3467,7 @@ Handle ConstructStubCompiler::CompileConstructStub( // r1: constructor function // r2: initial map // r7: undefined - ASSERT(function->has_initial_map()); __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset)); -#ifdef DEBUG - int instance_size = function->initial_map()->instance_size(); - __ cmp(r3, Operand(instance_size >> kPointerSizeLog2)); - __ Check(eq, "Instance size of initial map changed."); -#endif __ AllocateInNewSpace(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS); // Allocated the JSObject, now initialize the fields. Map is set to initial @@ -3573,6 +3525,7 @@ Handle ConstructStubCompiler::CompileConstructStub( } // Fill the unused in-object property fields with undefined. + ASSERT(function->has_initial_map()); for (int i = shared->this_property_assignments_count(); i < function->initial_map()->inobject_properties(); i++) { @@ -3856,20 +3809,20 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( __ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT); // Now we can use r0 for the result as key is not needed any more. __ mov(r0, r5); - Register dst_mantissa = r1; - Register dst_exponent = r3; + Register dst1 = r1; + Register dst2 = r3; FloatingPointHelper::Destination dest = FloatingPointHelper::kCoreRegisters; FloatingPointHelper::ConvertIntToDouble(masm, value, dest, d0, - dst_mantissa, - dst_exponent, + dst1, + dst2, r9, s0); - __ str(dst_mantissa, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); - __ str(dst_exponent, FieldMemOperand(r0, HeapNumber::kExponentOffset)); + __ str(dst1, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); + __ str(dst2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); __ Ret(); } } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) { @@ -4138,7 +4091,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( } FloatingPointHelper::ConvertIntToDouble( masm, r5, destination, - d0, r6, r7, // These are: double_dst, dst_mantissa, dst_exponent. + d0, r6, r7, // These are: double_dst, dst1, dst2. r4, s2); // These are: scratch2, single_scratch. if (destination == FloatingPointHelper::kVFPRegisters) { CpuFeatures::Scope scope(VFP2); @@ -4197,7 +4150,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // not include -kHeapObjectTag into it. __ sub(r5, value, Operand(kHeapObjectTag)); __ vldr(d0, r5, HeapNumber::kValueOffset); - __ EmitECMATruncate(r5, d0, d1, r6, r7, r9); + __ EmitECMATruncate(r5, d0, s2, r6, r7, r9); switch (elements_kind) { case EXTERNAL_BYTE_ELEMENTS: @@ -4690,12 +4643,9 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // -- r1 : key // -- r2 : receiver // -- lr : return address - // -- r3 : scratch (elements backing store) + // -- r3 : scratch // -- r4 : scratch // -- r5 : scratch - // -- r6 : scratch - // -- r7 : scratch - // -- r9 : scratch // ----------------------------------- Label miss_force_generic, transition_elements_kind, grow, slow; Label finish_store, check_capacity; @@ -4708,7 +4658,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( Register scratch2 = r5; Register scratch3 = r6; Register scratch4 = r7; - Register scratch5 = r9; Register length_reg = r7; // This stub is meant to be tail-jumped to, the receiver must already @@ -4739,6 +4688,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ bind(&finish_store); __ StoreNumberToDoubleElements(value_reg, key_reg, + receiver_reg, // All registers after this are overwritten. elements_reg, scratch1, @@ -4787,7 +4737,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow, TAG_OBJECT); - // Initialize the new FixedDoubleArray. + // Initialize the new FixedDoubleArray. Leave elements unitialized for + // efficiency, they are guaranteed to be initialized before use. __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex); __ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset)); __ mov(scratch1, @@ -4795,25 +4746,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ str(scratch1, FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset)); - __ mov(scratch1, elements_reg); - __ StoreNumberToDoubleElements(value_reg, - key_reg, - // All registers after this are overwritten. - scratch1, - scratch2, - scratch3, - scratch4, - scratch5, - &transition_elements_kind); - - __ mov(scratch1, Operand(kHoleNanLower32)); - __ mov(scratch2, Operand(kHoleNanUpper32)); - for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) { - int offset = FixedDoubleArray::OffsetOfElementAt(i); - __ str(scratch1, FieldMemOperand(elements_reg, offset)); - __ str(scratch2, FieldMemOperand(elements_reg, offset + kPointerSize)); - } - // Install the new backing store in the JSArray. __ str(elements_reg, FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); @@ -4826,7 +4758,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); __ ldr(elements_reg, FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); - __ Ret(); + __ jmp(&finish_store); __ bind(&check_capacity); // Make sure that the backing store can hold additional elements. diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js index 47f796d..250c30c 100644 --- a/deps/v8/src/array.js +++ b/deps/v8/src/array.js @@ -413,7 +413,6 @@ function ArrayJoin(separator) { ["Array.prototype.join"]); } - var length = TO_UINT32(this.length); if (IS_UNDEFINED(separator)) { separator = ','; } else if (!IS_STRING(separator)) { @@ -423,7 +422,7 @@ function ArrayJoin(separator) { var result = %_FastAsciiArrayJoin(this, separator); if (!IS_UNDEFINED(result)) return result; - return Join(this, length, separator, ConvertToString); + return Join(this, TO_UINT32(this.length), separator, ConvertToString); } @@ -442,8 +441,8 @@ function ArrayPop() { } n--; var value = this[n]; - delete this[n]; this.length = n; + delete this[n]; return value; } @@ -582,7 +581,7 @@ function ArrayShift() { var first = this[0]; - if (IS_ARRAY(this) && !%IsObserved(this)) { + if (IS_ARRAY(this)) { SmartMove(this, 0, 1, len, 0); } else { SimpleMove(this, 0, 1, len, 0); @@ -603,7 +602,7 @@ function ArrayUnshift(arg1) { // length == 1 var len = TO_UINT32(this.length); var num_arguments = %_ArgumentsLength(); - if (IS_ARRAY(this) && !%IsObserved(this)) { + if (IS_ARRAY(this)) { SmartMove(this, 0, 0, len, num_arguments); } else { SimpleMove(this, 0, 0, len, num_arguments); @@ -650,7 +649,6 @@ function ArraySlice(start, end) { if (end_i < start_i) return result; if (IS_ARRAY(this) && - !%IsObserved(this) && (end_i > 1000) && (%EstimateNumberOfElements(this) < end_i)) { SmartSlice(this, start_i, end_i - start_i, len, result); @@ -707,9 +705,7 @@ function ArraySplice(start, delete_count) { var use_simple_splice = true; - if (IS_ARRAY(this) && - !%IsObserved(this) && - num_additional_args !== del_count) { + if (IS_ARRAY(this) && num_additional_args !== del_count) { // If we are only deleting/moving a few things near the end of the // array then the simple version is going to be faster, because it // doesn't touch most of the array. @@ -1553,11 +1549,9 @@ function SetUpArray() { // exposed to user code. // Adding only the functions that are actually used. SetUpLockedPrototype(InternalArray, $Array(), $Array( - "indexOf", getFunction("indexOf", ArrayIndexOf), "join", getFunction("join", ArrayJoin), "pop", getFunction("pop", ArrayPop), - "push", getFunction("push", ArrayPush), - "splice", getFunction("splice", ArraySplice) + "push", getFunction("push", ArrayPush) )); } diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc index 25157be..d81d4ae 100644 --- a/deps/v8/src/assembler.cc +++ b/deps/v8/src/assembler.cc @@ -103,78 +103,15 @@ static DoubleConstant double_constants; const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING"; -static bool math_exp_data_initialized = false; -static Mutex* math_exp_data_mutex = NULL; -static double* math_exp_constants_array = NULL; -static double* math_exp_log_table_array = NULL; - // ----------------------------------------------------------------------------- // Implementation of AssemblerBase -AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size) +AssemblerBase::AssemblerBase(Isolate* isolate) : isolate_(isolate), - jit_cookie_(0), - emit_debug_code_(FLAG_debug_code), - predictable_code_size_(false) { + jit_cookie_(0) { if (FLAG_mask_constants_with_cookie && isolate != NULL) { jit_cookie_ = V8::RandomPrivate(isolate); } - - if (buffer == NULL) { - // Do our own buffer management. - if (buffer_size <= kMinimalBufferSize) { - buffer_size = kMinimalBufferSize; - if (isolate->assembler_spare_buffer() != NULL) { - buffer = isolate->assembler_spare_buffer(); - isolate->set_assembler_spare_buffer(NULL); - } - } - if (buffer == NULL) buffer = NewArray(buffer_size); - own_buffer_ = true; - } else { - // Use externally provided buffer instead. - ASSERT(buffer_size > 0); - own_buffer_ = false; - } - buffer_ = static_cast(buffer); - buffer_size_ = buffer_size; - - pc_ = buffer_; -} - - -AssemblerBase::~AssemblerBase() { - if (own_buffer_) { - if (isolate() != NULL && - isolate()->assembler_spare_buffer() == NULL && - buffer_size_ == kMinimalBufferSize) { - isolate()->set_assembler_spare_buffer(buffer_); - } else { - DeleteArray(buffer_); - } - } -} - - -// ----------------------------------------------------------------------------- -// Implementation of PredictableCodeSizeScope - -PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler, - int expected_size) - : assembler_(assembler), - expected_size_(expected_size), - start_offset_(assembler->pc_offset()), - old_value_(assembler->predictable_code_size()) { - assembler_->set_predictable_code_size(true); -} - - -PredictableCodeSizeScope::~PredictableCodeSizeScope() { - // TODO(svenpanne) Remove the 'if' when everything works. - if (expected_size_ >= 0) { - CHECK_EQ(expected_size_, assembler_->pc_offset() - start_offset_); - } - assembler_->set_predictable_code_size(old_value_); } @@ -376,7 +313,6 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) { #ifdef DEBUG byte* begin_pos = pos_; #endif - ASSERT(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES); ASSERT(rinfo->pc() - last_pc_ >= 0); ASSERT(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM <= kMaxStandardNonCompactModes); @@ -634,15 +570,6 @@ void RelocIterator::next() { } } } - if (code_age_sequence_ != NULL) { - byte* old_code_age_sequence = code_age_sequence_; - code_age_sequence_ = NULL; - if (SetMode(RelocInfo::CODE_AGE_SEQUENCE)) { - rinfo_.data_ = 0; - rinfo_.pc_ = old_code_age_sequence; - return; - } - } done_ = true; } @@ -658,12 +585,6 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) { mode_mask_ = mode_mask; last_id_ = 0; last_position_ = 0; - byte* sequence = code->FindCodeAgeSequence(); - if (sequence != NULL && !Code::IsYoungSequence(sequence)) { - code_age_sequence_ = sequence; - } else { - code_age_sequence_ = NULL; - } if (mode_mask_ == 0) pos_ = end_; next(); } @@ -679,7 +600,6 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) { mode_mask_ = mode_mask; last_id_ = 0; last_position_ = 0; - code_age_sequence_ = NULL; if (mode_mask_ == 0) pos_ = end_; next(); } @@ -732,8 +652,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) { UNREACHABLE(); #endif return "debug break slot"; - case RelocInfo::CODE_AGE_SEQUENCE: - return "code_age_sequence"; case RelocInfo::NUMBER_OF_MODES: UNREACHABLE(); return "number_of_modes"; @@ -821,9 +739,6 @@ void RelocInfo::Verify() { case NUMBER_OF_MODES: UNREACHABLE(); break; - case CODE_AGE_SEQUENCE: - ASSERT(Code::IsYoungSequence(pc_) || code_age_stub()->IsCode()); - break; } } #endif // VERIFY_HEAP @@ -841,70 +756,6 @@ void ExternalReference::SetUp() { double_constants.canonical_non_hole_nan = OS::nan_value(); double_constants.the_hole_nan = BitCast(kHoleNanInt64); double_constants.negative_infinity = -V8_INFINITY; - - math_exp_data_mutex = OS::CreateMutex(); -} - - -void ExternalReference::InitializeMathExpData() { - // Early return? - if (math_exp_data_initialized) return; - - math_exp_data_mutex->Lock(); - if (!math_exp_data_initialized) { - // If this is changed, generated code must be adapted too. - const int kTableSizeBits = 11; - const int kTableSize = 1 << kTableSizeBits; - const double kTableSizeDouble = static_cast(kTableSize); - - math_exp_constants_array = new double[9]; - // Input values smaller than this always return 0. - math_exp_constants_array[0] = -708.39641853226408; - // Input values larger than this always return +Infinity. - math_exp_constants_array[1] = 709.78271289338397; - math_exp_constants_array[2] = V8_INFINITY; - // The rest is black magic. Do not attempt to understand it. It is - // loosely based on the "expd" function published at: - // http://herumi.blogspot.com/2011/08/fast-double-precision-exponential.html - const double constant3 = (1 << kTableSizeBits) / log(2.0); - math_exp_constants_array[3] = constant3; - math_exp_constants_array[4] = - static_cast(static_cast(3) << 51); - math_exp_constants_array[5] = 1 / constant3; - math_exp_constants_array[6] = 3.0000000027955394; - math_exp_constants_array[7] = 0.16666666685227835; - math_exp_constants_array[8] = 1; - - math_exp_log_table_array = new double[kTableSize]; - for (int i = 0; i < kTableSize; i++) { - double value = pow(2, i / kTableSizeDouble); - - uint64_t bits = BitCast(value); - bits &= (static_cast(1) << 52) - 1; - double mantissa = BitCast(bits); - - // - uint64_t doublebits; - memcpy(&doublebits, &value, sizeof doublebits); - doublebits &= (static_cast(1) << 52) - 1; - double mantissa2; - memcpy(&mantissa2, &doublebits, sizeof mantissa2); - CHECK_EQ(mantissa, mantissa2); - // - - math_exp_log_table_array[i] = mantissa; - } - - math_exp_data_initialized = true; - } - math_exp_data_mutex->Unlock(); -} - - -void ExternalReference::TearDownMathExpData() { - delete[] math_exp_constants_array; - delete[] math_exp_log_table_array; - delete math_exp_data_mutex; } @@ -1023,13 +874,6 @@ ExternalReference ExternalReference::get_date_field_function( } -ExternalReference ExternalReference::get_make_code_young_function( - Isolate* isolate) { - return ExternalReference(Redirect( - isolate, FUNCTION_ADDR(Code::MakeCodeAgeSequenceYoung))); -} - - ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) { return ExternalReference(isolate->date_cache()->stamp_address()); } @@ -1056,20 +900,6 @@ ExternalReference ExternalReference::compute_output_frames_function( } -ExternalReference ExternalReference::log_enter_external_function( - Isolate* isolate) { - return ExternalReference( - Redirect(isolate, FUNCTION_ADDR(Logger::EnterExternal))); -} - - -ExternalReference ExternalReference::log_leave_external_function( - Isolate* isolate) { - return ExternalReference( - Redirect(isolate, FUNCTION_ADDR(Logger::LeaveExternal))); -} - - ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) { return ExternalReference(isolate->keyed_lookup_cache()->keys_address()); } @@ -1356,19 +1186,6 @@ ExternalReference ExternalReference::math_log_double_function( } -ExternalReference ExternalReference::math_exp_constants(int constant_index) { - ASSERT(math_exp_data_initialized); - return ExternalReference( - reinterpret_cast(math_exp_constants_array + constant_index)); -} - - -ExternalReference ExternalReference::math_exp_log_table() { - ASSERT(math_exp_data_initialized); - return ExternalReference(reinterpret_cast(math_exp_log_table_array)); -} - - ExternalReference ExternalReference::page_flags(Page* page) { return ExternalReference(reinterpret_cast
(page) + MemoryChunk::kFlagsOffset); diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index 4639374..a0e55cc 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -56,56 +56,18 @@ struct StatsCounter; class AssemblerBase: public Malloced { public: - AssemblerBase(Isolate* isolate, void* buffer, int buffer_size); - virtual ~AssemblerBase(); + explicit AssemblerBase(Isolate* isolate); Isolate* isolate() const { return isolate_; } - int jit_cookie() const { return jit_cookie_; } - - bool emit_debug_code() const { return emit_debug_code_; } - void set_emit_debug_code(bool value) { emit_debug_code_ = value; } - - bool predictable_code_size() const { return predictable_code_size_; } - void set_predictable_code_size(bool value) { predictable_code_size_ = value; } + int jit_cookie() { return jit_cookie_; } // Overwrite a host NaN with a quiet target NaN. Used by mksnapshot for // cross-snapshotting. static void QuietNaN(HeapObject* nan) { } - int pc_offset() const { return static_cast(pc_ - buffer_); } - - static const int kMinimalBufferSize = 4*KB; - - protected: - // The buffer into which code and relocation info are generated. It could - // either be owned by the assembler or be provided externally. - byte* buffer_; - int buffer_size_; - bool own_buffer_; - - // The program counter, which points into the buffer above and moves forward. - byte* pc_; - private: Isolate* isolate_; int jit_cookie_; - bool emit_debug_code_; - bool predictable_code_size_; -}; - - -// Avoids using instructions that vary in size in unpredictable ways between the -// snapshot and the running VM. -class PredictableCodeSizeScope { - public: - PredictableCodeSizeScope(AssemblerBase* assembler, int expected_size); - ~PredictableCodeSizeScope(); - - private: - AssemblerBase* assembler_; - int expected_size_; - int start_offset_; - bool old_value_; }; @@ -249,12 +211,6 @@ class RelocInfo BASE_EMBEDDED { // Pseudo-types NUMBER_OF_MODES, // There are at most 15 modes with noncompact encoding. NONE, // never recorded - CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explictly by - // code aging. - FIRST_REAL_RELOC_MODE = CODE_TARGET, - LAST_REAL_RELOC_MODE = CONST_POOL, - FIRST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE, - LAST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE, LAST_CODE_ENUM = DEBUG_BREAK, LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL, // Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding. @@ -269,15 +225,6 @@ class RelocInfo BASE_EMBEDDED { : pc_(pc), rmode_(rmode), data_(data), host_(host) { } - static inline bool IsRealRelocMode(Mode mode) { - return mode >= FIRST_REAL_RELOC_MODE && - mode <= LAST_REAL_RELOC_MODE; - } - static inline bool IsPseudoRelocMode(Mode mode) { - ASSERT(!IsRealRelocMode(mode)); - return mode >= FIRST_PSEUDO_RELOC_MODE && - mode <= LAST_PSEUDO_RELOC_MODE; - } static inline bool IsConstructCall(Mode mode) { return mode == CONSTRUCT_CALL; } @@ -315,9 +262,6 @@ class RelocInfo BASE_EMBEDDED { static inline bool IsDebugBreakSlot(Mode mode) { return mode == DEBUG_BREAK_SLOT; } - static inline bool IsCodeAgeSequence(Mode mode) { - return mode == CODE_AGE_SEQUENCE; - } static inline int ModeMask(Mode mode) { return 1 << mode; } // Accessors @@ -350,8 +294,7 @@ class RelocInfo BASE_EMBEDDED { INLINE(Handle target_cell_handle()); INLINE(void set_target_cell(JSGlobalPropertyCell* cell, WriteBarrierMode mode = UPDATE_WRITE_BARRIER)); - INLINE(Code* code_age_stub()); - INLINE(void set_code_age_stub(Code* stub)); + // Read the address of the word containing the target_address in an // instruction stream. What this means exactly is architecture-independent. @@ -544,7 +487,6 @@ class RelocIterator: public Malloced { byte* pos_; byte* end_; - byte* code_age_sequence_; RelocInfo rinfo_; bool done_; int mode_mask_; @@ -604,8 +546,6 @@ class ExternalReference BASE_EMBEDDED { }; static void SetUp(); - static void InitializeMathExpData(); - static void TearDownMathExpData(); typedef void* ExternalReferenceRedirector(void* original, Type type); @@ -655,16 +595,10 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference get_date_field_function(Isolate* isolate); static ExternalReference date_cache_stamp(Isolate* isolate); - static ExternalReference get_make_code_young_function(Isolate* isolate); - // Deoptimization support. static ExternalReference new_deoptimizer_function(Isolate* isolate); static ExternalReference compute_output_frames_function(Isolate* isolate); - // Log support. - static ExternalReference log_enter_external_function(Isolate* isolate); - static ExternalReference log_leave_external_function(Isolate* isolate); - // Static data in the keyed lookup cache. static ExternalReference keyed_lookup_cache_keys(Isolate* isolate); static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate); @@ -731,9 +665,6 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference math_tan_double_function(Isolate* isolate); static ExternalReference math_log_double_function(Isolate* isolate); - static ExternalReference math_exp_constants(int constant_index); - static ExternalReference math_exp_log_table(); - static ExternalReference page_flags(Page* page); Address address() const {return reinterpret_cast
(address_);} diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc index 232cb73..52990b8 100644 --- a/deps/v8/src/ast.cc +++ b/deps/v8/src/ast.cc @@ -103,7 +103,6 @@ VariableProxy::VariableProxy(Isolate* isolate, void VariableProxy::BindTo(Variable* var) { ASSERT(var_ == NULL); // must be bound only once ASSERT(var != NULL); // must bind - ASSERT(!FLAG_harmony_modules || interface_->IsUnified(var->interface())); ASSERT((is_this() && var->is_this()) || name_.is_identical_to(var->name())); // Ideally CONST-ness should match. However, this is very hard to achieve // because we don't know the exact semantics of conflicting (const and @@ -477,7 +476,6 @@ void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle, void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) { TypeInfo info = oracle->SwitchType(this); - if (info.IsUninitialized()) info = TypeInfo::Unknown(); if (info.IsSmi()) { compare_type_ = SMI_ONLY; } else if (info.IsSymbol()) { @@ -606,6 +604,18 @@ void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) { } +void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) { + TypeInfo info = oracle->CompareType(this); + if (info.IsSmi()) { + compare_type_ = SMI_ONLY; + } else if (info.IsNonPrimitive()) { + compare_type_ = OBJECT_ONLY; + } else { + ASSERT(compare_type_ == NONE); + } +} + + void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) { receiver_type_ = oracle->ObjectLiteralStoreIsMonomorphic(this) ? oracle->GetObjectLiteralStoreMap(this) @@ -1060,14 +1070,16 @@ REGULAR_NODE(CallNew) // LOOKUP variables only result from constructs that cannot be inlined anyway. REGULAR_NODE(VariableProxy) -// We currently do not optimize any modules. +// We currently do not optimize any modules. Note in particular, that module +// instance objects associated with ModuleLiterals are allocated during +// scope resolution, and references to them are embedded into the code. +// That code may hence neither be cached nor re-compiled. DONT_OPTIMIZE_NODE(ModuleDeclaration) DONT_OPTIMIZE_NODE(ImportDeclaration) DONT_OPTIMIZE_NODE(ExportDeclaration) DONT_OPTIMIZE_NODE(ModuleVariable) DONT_OPTIMIZE_NODE(ModulePath) DONT_OPTIMIZE_NODE(ModuleUrl) -DONT_OPTIMIZE_NODE(ModuleStatement) DONT_OPTIMIZE_NODE(WithStatement) DONT_OPTIMIZE_NODE(TryCatchStatement) DONT_OPTIMIZE_NODE(TryFinallyStatement) diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h index d299f19..802ac65 100644 --- a/deps/v8/src/ast.h +++ b/deps/v8/src/ast.h @@ -75,7 +75,6 @@ namespace internal { #define STATEMENT_NODE_LIST(V) \ V(Block) \ - V(ModuleStatement) \ V(ExpressionStatement) \ V(EmptyStatement) \ V(IfStatement) \ @@ -523,7 +522,7 @@ class ModuleDeclaration: public Declaration { ModuleDeclaration(VariableProxy* proxy, Module* module, Scope* scope) - : Declaration(proxy, MODULE, scope), + : Declaration(proxy, LET, scope), module_(module) { } @@ -646,25 +645,6 @@ class ModuleUrl: public Module { }; -class ModuleStatement: public Statement { - public: - DECLARE_NODE_TYPE(ModuleStatement) - - VariableProxy* proxy() const { return proxy_; } - Block* body() const { return body_; } - - protected: - ModuleStatement(VariableProxy* proxy, Block* body) - : proxy_(proxy), - body_(body) { - } - - private: - VariableProxy* proxy_; - Block* body_; -}; - - class IterationStatement: public BreakableStatement { public: // Type testing & conversion. @@ -1437,7 +1417,7 @@ class VariableProxy: public Expression { void MarkAsTrivial() { is_trivial_ = true; } void MarkAsLValue() { is_lvalue_ = true; } - // Bind this proxy to the variable var. Interfaces must match. + // Bind this proxy to the variable var. void BindTo(Variable* var); protected: @@ -1797,6 +1777,9 @@ class CompareOperation: public Expression { // Type feedback information. TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); } + void RecordTypeFeedback(TypeFeedbackOracle* oracle); + bool IsSmiCompare() { return compare_type_ == SMI_ONLY; } + bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; } // Match special cases. bool IsLiteralCompareTypeof(Expression** expr, Handle* check); @@ -1813,7 +1796,8 @@ class CompareOperation: public Expression { op_(op), left_(left), right_(right), - pos_(pos) { + pos_(pos), + compare_type_(NONE) { ASSERT(Token::IsCompareOp(op)); } @@ -1822,6 +1806,9 @@ class CompareOperation: public Expression { Expression* left_; Expression* right_; int pos_; + + enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY }; + CompareTypeFeedback compare_type_; }; @@ -2660,11 +2647,6 @@ class AstNodeFactory BASE_EMBEDDED { STATEMENT_WITH_LABELS(SwitchStatement) #undef STATEMENT_WITH_LABELS - ModuleStatement* NewModuleStatement(VariableProxy* proxy, Block* body) { - ModuleStatement* stmt = new(zone_) ModuleStatement(proxy, body); - VISIT_AND_RETURN(ModuleStatement, stmt) - } - ExpressionStatement* NewExpressionStatement(Expression* expression) { ExpressionStatement* stmt = new(zone_) ExpressionStatement(expression); VISIT_AND_RETURN(ExpressionStatement, stmt) diff --git a/deps/v8/src/atomicops.h b/deps/v8/src/atomicops.h index da33b29..1f0c44a 100644 --- a/deps/v8/src/atomicops.h +++ b/deps/v8/src/atomicops.h @@ -151,9 +151,7 @@ Atomic64 Release_Load(volatile const Atomic64* ptr); } } // namespace v8::internal // Include our platform specific implementation. -#if defined(THREAD_SANITIZER) -#include "atomicops_internals_tsan.h" -#elif defined(_MSC_VER) && \ +#if defined(_MSC_VER) && \ (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)) #include "atomicops_internals_x86_msvc.h" #elif defined(__APPLE__) && \ diff --git a/deps/v8/src/atomicops_internals_tsan.h b/deps/v8/src/atomicops_internals_tsan.h deleted file mode 100644 index 6559336..0000000 --- a/deps/v8/src/atomicops_internals_tsan.h +++ /dev/null @@ -1,335 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -// This file is an internal atomic implementation for compiler-based -// ThreadSanitizer. Use base/atomicops.h instead. - -#ifndef V8_ATOMICOPS_INTERNALS_TSAN_H_ -#define V8_ATOMICOPS_INTERNALS_TSAN_H_ - -// This struct is not part of the public API of this module; clients may not -// use it. (However, it's exported via BASE_EXPORT because clients implicitly -// do use it at link time by inlining these functions.) -// Features of this x86. Values may not be correct before main() is run, -// but are set conservatively. -struct AtomicOps_x86CPUFeatureStruct { - bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence - // after acquire compare-and-swap. - bool has_sse2; // Processor has SSE2. -}; -extern struct AtomicOps_x86CPUFeatureStruct - AtomicOps_Internalx86CPUFeatures; - -#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") - -namespace v8 { -namespace internal { - -#ifndef TSAN_INTERFACE_ATOMIC_H -#define TSAN_INTERFACE_ATOMIC_H - -#ifdef __cplusplus -extern "C" { -#endif - -typedef char __tsan_atomic8; -typedef short __tsan_atomic16; // NOLINT -typedef int __tsan_atomic32; -typedef long __tsan_atomic64; // NOLINT - -typedef enum { - __tsan_memory_order_relaxed = (1 << 0) + 100500, - __tsan_memory_order_consume = (1 << 1) + 100500, - __tsan_memory_order_acquire = (1 << 2) + 100500, - __tsan_memory_order_release = (1 << 3) + 100500, - __tsan_memory_order_acq_rel = (1 << 4) + 100500, - __tsan_memory_order_seq_cst = (1 << 5) + 100500, -} __tsan_memory_order; - -__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a, - __tsan_memory_order mo); -__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a, - __tsan_memory_order mo); -__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a, - __tsan_memory_order mo); -__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a, - __tsan_memory_order mo); - -void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v, - __tsan_memory_order mo); -void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v, - __tsan_memory_order mo); -void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v, - __tsan_memory_order mo); -void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v, - __tsan_memory_order mo); - -__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a, - __tsan_atomic8 v, __tsan_memory_order mo); -__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a, - __tsan_atomic16 v, __tsan_memory_order mo); -__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a, - __tsan_atomic32 v, __tsan_memory_order mo); -__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a, - __tsan_atomic64 v, __tsan_memory_order mo); - -__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a, - __tsan_atomic8 v, __tsan_memory_order mo); -__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a, - __tsan_atomic16 v, __tsan_memory_order mo); -__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a, - __tsan_atomic32 v, __tsan_memory_order mo); -__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a, - __tsan_atomic64 v, __tsan_memory_order mo); - -__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a, - __tsan_atomic8 v, __tsan_memory_order mo); -__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a, - __tsan_atomic16 v, __tsan_memory_order mo); -__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a, - __tsan_atomic32 v, __tsan_memory_order mo); -__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a, - __tsan_atomic64 v, __tsan_memory_order mo); - -__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a, - __tsan_atomic8 v, __tsan_memory_order mo); -__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a, - __tsan_atomic16 v, __tsan_memory_order mo); -__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a, - __tsan_atomic32 v, __tsan_memory_order mo); -__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a, - __tsan_atomic64 v, __tsan_memory_order mo); - -__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a, - __tsan_atomic8 v, __tsan_memory_order mo); -__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a, - __tsan_atomic16 v, __tsan_memory_order mo); -__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a, - __tsan_atomic32 v, __tsan_memory_order mo); -__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a, - __tsan_atomic64 v, __tsan_memory_order mo); - -int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a, - __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo); -int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a, - __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo); -int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a, - __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo); -int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a, - __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo); - -int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a, - __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo); -int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a, - __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo); -int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a, - __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo); -int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a, - __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo); - -void __tsan_atomic_thread_fence(__tsan_memory_order mo); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // #ifndef TSAN_INTERFACE_ATOMIC_H - -inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - Atomic32 cmp = old_value; - __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, - __tsan_memory_order_relaxed); - return cmp; -} - -inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, - Atomic32 new_value) { - return __tsan_atomic32_exchange(ptr, new_value, - __tsan_memory_order_relaxed); -} - -inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr, - Atomic32 new_value) { - return __tsan_atomic32_exchange(ptr, new_value, - __tsan_memory_order_acquire); -} - -inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr, - Atomic32 new_value) { - return __tsan_atomic32_exchange(ptr, new_value, - __tsan_memory_order_release); -} - -inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - return increment + __tsan_atomic32_fetch_add(ptr, increment, - __tsan_memory_order_relaxed); -} - -inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - return increment + __tsan_atomic32_fetch_add(ptr, increment, - __tsan_memory_order_acq_rel); -} - -inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - Atomic32 cmp = old_value; - __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, - __tsan_memory_order_acquire); - return cmp; -} - -inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - Atomic32 cmp = old_value; - __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, - __tsan_memory_order_release); - return cmp; -} - -inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { - __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed); -} - -inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { - __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed); - __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); -} - -inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { - __tsan_atomic32_store(ptr, value, __tsan_memory_order_release); -} - -inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { - return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed); -} - -inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { - return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire); -} - -inline Atomic32 Release_Load(volatile const Atomic32* ptr) { - __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); - return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed); -} - -inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - Atomic64 cmp = old_value; - __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, - __tsan_memory_order_relaxed); - return cmp; -} - -inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, - Atomic64 new_value) { - return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed); -} - -inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, - Atomic64 new_value) { - return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire); -} - -inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, - Atomic64 new_value) { - return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release); -} - -inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, - Atomic64 increment) { - return increment + __tsan_atomic64_fetch_add(ptr, increment, - __tsan_memory_order_relaxed); -} - -inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, - Atomic64 increment) { - return increment + __tsan_atomic64_fetch_add(ptr, increment, - __tsan_memory_order_acq_rel); -} - -inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { - __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed); -} - -inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { - __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed); - __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); -} - -inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { - __tsan_atomic64_store(ptr, value, __tsan_memory_order_release); -} - -inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { - return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed); -} - -inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { - return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire); -} - -inline Atomic64 Release_Load(volatile const Atomic64* ptr) { - __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); - return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed); -} - -inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - Atomic64 cmp = old_value; - __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, - __tsan_memory_order_acquire); - return cmp; -} - -inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - Atomic64 cmp = old_value; - __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, - __tsan_memory_order_release); - return cmp; -} - -inline void MemoryBarrier() { - __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); -} - -} // namespace internal -} // namespace v8 - -#undef ATOMICOPS_COMPILER_BARRIER - -#endif // V8_ATOMICOPS_INTERNALS_TSAN_H_ diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index 8d52950..a368eef 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -1084,11 +1084,11 @@ bool Genesis::InitializeGlobal(Handle inner_global, LookupResult lookup(isolate); result->LocalLookup(heap->callee_symbol(), &lookup); ASSERT(lookup.IsField()); - ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsCalleeIndex); + ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsCalleeIndex); result->LocalLookup(heap->length_symbol(), &lookup); ASSERT(lookup.IsField()); - ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsLengthIndex); + ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex); ASSERT(result->map()->inobject_properties() > Heap::kArgumentsCalleeIndex); ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex); @@ -1186,7 +1186,7 @@ bool Genesis::InitializeGlobal(Handle inner_global, LookupResult lookup(isolate); result->LocalLookup(heap->length_symbol(), &lookup); ASSERT(lookup.IsField()); - ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsLengthIndex); + ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex); ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex); @@ -1240,9 +1240,8 @@ bool Genesis::InitializeGlobal(Handle inner_global, // Initialize the out of memory slot. native_context()->set_out_of_memory(heap->false_value()); - // Initialize the embedder data slot. - Handle embedder_data = factory->NewFixedArray(2); - native_context()->set_embedder_data(*embedder_data); + // Initialize the data slot. + native_context()->set_data(heap->undefined_value()); { // Initialize the random seed slot. @@ -1341,7 +1340,7 @@ bool Genesis::CompileScriptCached(Vector name, // If we can't find the function in the cache, we compile a new // function and insert it into the cache. if (cache == NULL || !cache->Lookup(name, &function_info)) { - ASSERT(source->IsOneByteRepresentation()); + ASSERT(source->IsAsciiRepresentation()); Handle script_name = factory->NewStringFromUtf8(name); function_info = Compiler::Compile( source, @@ -1416,11 +1415,6 @@ void Genesis::InstallExperimentalNativeFunctions() { INSTALL_NATIVE(JSFunction, "DerivedSetTrap", derived_set_trap); INSTALL_NATIVE(JSFunction, "ProxyEnumerate", proxy_enumerate); } - if (FLAG_harmony_observation) { - INSTALL_NATIVE(JSFunction, "NotifyChange", observers_notify_change); - INSTALL_NATIVE(JSFunction, "DeliverChangeRecords", - observers_deliver_changes); - } } #undef INSTALL_NATIVE @@ -1834,11 +1828,6 @@ bool Genesis::InstallExperimentalNatives() { "native collection.js") == 0) { if (!CompileExperimentalBuiltin(isolate(), i)) return false; } - if (FLAG_harmony_observation && - strcmp(ExperimentalNatives::GetScriptName(i).start(), - "native object-observe.js") == 0) { - if (!CompileExperimentalBuiltin(isolate(), i)) return false; - } } InstallExperimentalNativeFunctions(); diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h index d61c031..179e65c 100644 --- a/deps/v8/src/bootstrapper.h +++ b/deps/v8/src/bootstrapper.h @@ -54,7 +54,7 @@ class SourceCodeCache BASE_EMBEDDED { bool Lookup(Vector name, Handle* handle) { for (int i = 0; i < cache_->length(); i+=2) { - SeqOneByteString* str = SeqOneByteString::cast(cache_->get(i)); + SeqAsciiString* str = SeqAsciiString::cast(cache_->get(i)); if (str->IsEqualTo(name)) { *handle = Handle( SharedFunctionInfo::cast(cache_->get(i + 1))); diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index d62713d..df70cd4 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -268,7 +268,7 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args, maybe_elms = heap->AllocateFixedArrayWithHoles(number_of_elements); } FixedArrayBase* elms; - if (!maybe_elms->To(&elms)) return maybe_elms; + if (!maybe_elms->To(&elms)) return maybe_elms; // Fill in the content switch (array->GetElementsKind()) { @@ -325,18 +325,6 @@ BUILTIN(ArrayCodeGeneric) { } -static void MoveDoubleElements(FixedDoubleArray* dst, - int dst_index, - FixedDoubleArray* src, - int src_index, - int len) { - if (len == 0) return; - memmove(dst->data_start() + dst_index, - src->data_start() + src_index, - len * kDoubleSize); -} - - static void MoveElements(Heap* heap, AssertNoAllocation* no_gc, FixedArray* dst, @@ -363,39 +351,24 @@ static void FillWithHoles(Heap* heap, FixedArray* dst, int from, int to) { } -static void FillWithHoles(FixedDoubleArray* dst, int from, int to) { - for (int i = from; i < to; i++) { - dst->set_the_hole(i); - } -} - - -static FixedArrayBase* LeftTrimFixedArray(Heap* heap, - FixedArrayBase* elms, - int to_trim) { - Map* map = elms->map(); - int entry_size; - if (elms->IsFixedArray()) { - entry_size = kPointerSize; - } else { - entry_size = kDoubleSize; - } +static FixedArray* LeftTrimFixedArray(Heap* heap, + FixedArray* elms, + int to_trim) { ASSERT(elms->map() != HEAP->fixed_cow_array_map()); // For now this trick is only applied to fixed arrays in new and paged space. // In large object space the object's start must coincide with chunk // and thus the trick is just not applicable. ASSERT(!HEAP->lo_space()->Contains(elms)); - STATIC_ASSERT(FixedArrayBase::kMapOffset == 0); - STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize); - STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize); + STATIC_ASSERT(FixedArray::kMapOffset == 0); + STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize); + STATIC_ASSERT(FixedArray::kHeaderSize == 2 * kPointerSize); Object** former_start = HeapObject::RawField(elms, 0); const int len = elms->length(); - if (to_trim * entry_size > FixedArrayBase::kHeaderSize && - elms->IsFixedArray() && + if (to_trim > FixedArray::kHeaderSize / kPointerSize && !heap->new_space()->Contains(elms)) { // If we are doing a big trim in old space then we zap the space that was // formerly part of the array so that the GC (aided by the card-based @@ -409,15 +382,14 @@ static FixedArrayBase* LeftTrimFixedArray(Heap* heap, // Technically in new space this write might be omitted (except for // debug mode which iterates through the heap), but to play safer // we still do it. - heap->CreateFillerObjectAt(elms->address(), to_trim * entry_size); + heap->CreateFillerObjectAt(elms->address(), to_trim * kPointerSize); - int new_start_index = to_trim * (entry_size / kPointerSize); - former_start[new_start_index] = map; - former_start[new_start_index + 1] = Smi::FromInt(len - to_trim); + former_start[to_trim] = heap->fixed_array_map(); + former_start[to_trim + 1] = Smi::FromInt(len - to_trim); // Maintain marking consistency for HeapObjectIterator and // IncrementalMarking. - int size_delta = to_trim * entry_size; + int size_delta = to_trim * kPointerSize; if (heap->marking()->TransferMark(elms->address(), elms->address() + size_delta)) { MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta); @@ -425,8 +397,8 @@ static FixedArrayBase* LeftTrimFixedArray(Heap* heap, HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(), elms->address() + size_delta)); - return FixedArrayBase::cast(HeapObject::FromAddress( - elms->address() + to_trim * entry_size)); + return FixedArray::cast(HeapObject::FromAddress( + elms->address() + to_trim * kPointerSize)); } @@ -455,14 +427,19 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements( Map* map = elms->map(); if (map == heap->fixed_array_map()) { if (args == NULL || array->HasFastObjectElements()) return elms; + if (array->HasFastDoubleElements()) { + ASSERT(elms == heap->empty_fixed_array()); + MaybeObject* maybe_transition = + array->TransitionElementsKind(FAST_ELEMENTS); + if (maybe_transition->IsFailure()) return maybe_transition; + return elms; + } } else if (map == heap->fixed_cow_array_map()) { MaybeObject* maybe_writable_result = array->EnsureWritableFastElements(); if (args == NULL || array->HasFastObjectElements() || - !maybe_writable_result->To(&elms)) { + maybe_writable_result->IsFailure()) { return maybe_writable_result; } - } else if (map == heap->fixed_double_array_map()) { - if (args == NULL) return elms; } else { return NULL; } @@ -472,28 +449,13 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements( int args_length = args->length(); if (first_added_arg >= args_length) return array->elements(); - ElementsKind origin_kind = array->map()->elements_kind(); - ASSERT(!IsFastObjectElementsKind(origin_kind)); - ElementsKind target_kind = origin_kind; - int arg_count = args->length() - first_added_arg; - Object** arguments = args->arguments() - first_added_arg - (arg_count - 1); - for (int i = 0; i < arg_count; i++) { - Object* arg = arguments[i]; - if (arg->IsHeapObject()) { - if (arg->IsHeapNumber()) { - target_kind = FAST_DOUBLE_ELEMENTS; - } else { - target_kind = FAST_ELEMENTS; - break; - } - } - } - if (target_kind != origin_kind) { - MaybeObject* maybe_failure = array->TransitionElementsKind(target_kind); - if (maybe_failure->IsFailure()) return maybe_failure; - return array->elements(); - } - return elms; + MaybeObject* maybe_array = array->EnsureCanContainElements( + args, + first_added_arg, + args_length - first_added_arg, + DONT_ALLOW_DOUBLE_ELEMENTS); + if (maybe_array->IsFailure()) return maybe_array; + return array->elements(); } @@ -537,200 +499,127 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin( BUILTIN(ArrayPush) { Heap* heap = isolate->heap(); Object* receiver = *args.receiver(); - FixedArrayBase* elms_obj; - MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1); - if (maybe_elms_obj == NULL) { - return CallJsBuiltin(isolate, "ArrayPush", args); - } - if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj; - - if (FLAG_harmony_observation && - JSObject::cast(receiver)->map()->is_observed()) { - return CallJsBuiltin(isolate, "ArrayPush", args); + Object* elms_obj; + { MaybeObject* maybe_elms_obj = + EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1); + if (maybe_elms_obj == NULL) { + return CallJsBuiltin(isolate, "ArrayPush", args); + } + if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; } - + FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); - ElementsKind kind = array->GetElementsKind(); - if (IsFastSmiOrObjectElementsKind(kind)) { - FixedArray* elms = FixedArray::cast(elms_obj); - - int len = Smi::cast(array->length())->value(); - int to_add = args.length() - 1; - if (to_add == 0) { - return Smi::FromInt(len); - } - // Currently fixed arrays cannot grow too big, so - // we should never hit this case. - ASSERT(to_add <= (Smi::kMaxValue - len)); - - int new_length = len + to_add; - - if (new_length > elms->length()) { - // New backing storage is needed. - int capacity = new_length + (new_length >> 1) + 16; - FixedArray* new_elms; - MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity); - if (!maybe_obj->To(&new_elms)) return maybe_obj; - - ElementsAccessor* accessor = array->GetElementsAccessor(); - MaybeObject* maybe_failure = accessor->CopyElements( - NULL, 0, new_elms, kind, 0, - ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj); - ASSERT(!maybe_failure->IsFailure()); - USE(maybe_failure); - - elms = new_elms; - } - - // Add the provided values. - AssertNoAllocation no_gc; - WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); - for (int index = 0; index < to_add; index++) { - elms->set(index + len, args[index + 1], mode); - } - - if (elms != array->elements()) { - array->set_elements(elms); - } + int len = Smi::cast(array->length())->value(); + int to_add = args.length() - 1; + if (to_add == 0) { + return Smi::FromInt(len); + } + // Currently fixed arrays cannot grow too big, so + // we should never hit this case. + ASSERT(to_add <= (Smi::kMaxValue - len)); - // Set the length. - array->set_length(Smi::FromInt(new_length)); - return Smi::FromInt(new_length); - } else { - int len = Smi::cast(array->length())->value(); - int elms_len = elms_obj->length(); + int new_length = len + to_add; - int to_add = args.length() - 1; - if (to_add == 0) { - return Smi::FromInt(len); + if (new_length > elms->length()) { + // New backing storage is needed. + int capacity = new_length + (new_length >> 1) + 16; + Object* obj; + { MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity); + if (!maybe_obj->ToObject(&obj)) return maybe_obj; } - // Currently fixed arrays cannot grow too big, so - // we should never hit this case. - ASSERT(to_add <= (Smi::kMaxValue - len)); + FixedArray* new_elms = FixedArray::cast(obj); - int new_length = len + to_add; - - FixedDoubleArray* new_elms; - - if (new_length > elms_len) { - // New backing storage is needed. - int capacity = new_length + (new_length >> 1) + 16; - MaybeObject* maybe_obj = - heap->AllocateUninitializedFixedDoubleArray(capacity); - if (!maybe_obj->To(&new_elms)) return maybe_obj; - - ElementsAccessor* accessor = array->GetElementsAccessor(); - MaybeObject* maybe_failure = accessor->CopyElements( - NULL, 0, new_elms, kind, 0, - ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj); - ASSERT(!maybe_failure->IsFailure()); - USE(maybe_failure); - } else { - // to_add is > 0 and new_length <= elms_len, so elms_obj cannot be the - // empty_fixed_array. - new_elms = FixedDoubleArray::cast(elms_obj); - } + ElementsKind kind = array->GetElementsKind(); + CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, 0, len); + FillWithHoles(heap, new_elms, new_length, capacity); - // Add the provided values. - AssertNoAllocation no_gc; - int index; - for (index = 0; index < to_add; index++) { - Object* arg = args[index + 1]; - new_elms->set(index + len, arg->Number()); - } + elms = new_elms; + } - if (new_elms != array->elements()) { - array->set_elements(new_elms); - } + // Add the provided values. + AssertNoAllocation no_gc; + WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); + for (int index = 0; index < to_add; index++) { + elms->set(index + len, args[index + 1], mode); + } - // Set the length. - array->set_length(Smi::FromInt(new_length)); - return Smi::FromInt(new_length); + if (elms != array->elements()) { + array->set_elements(elms); } + + // Set the length. + array->set_length(Smi::FromInt(new_length)); + return Smi::FromInt(new_length); } BUILTIN(ArrayPop) { Heap* heap = isolate->heap(); Object* receiver = *args.receiver(); - FixedArrayBase* elms_obj; - MaybeObject* maybe_elms = - EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); - if (maybe_elms == NULL) return CallJsBuiltin(isolate, "ArrayPop", args); - if (!maybe_elms->To(&elms_obj)) return maybe_elms; - - JSArray* array = JSArray::cast(receiver); - - if (FLAG_harmony_observation && array->map()->is_observed()) { - return CallJsBuiltin(isolate, "ArrayPop", args); + Object* elms_obj; + { MaybeObject* maybe_elms_obj = + EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); + if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayPop", args); + if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; } + FixedArray* elms = FixedArray::cast(elms_obj); + JSArray* array = JSArray::cast(receiver); int len = Smi::cast(array->length())->value(); if (len == 0) return heap->undefined_value(); - ElementsAccessor* accessor = array->GetElementsAccessor(); - int new_length = len - 1; - MaybeObject* maybe_result; - if (accessor->HasElement(array, array, new_length, elms_obj)) { - maybe_result = accessor->Get(array, array, new_length, elms_obj); - } else { - maybe_result = array->GetPrototype()->GetElement(len - 1); + // Get top element + MaybeObject* top = elms->get(len - 1); + + // Set the length. + array->set_length(Smi::FromInt(len - 1)); + + if (!top->IsTheHole()) { + // Delete the top element. + elms->set_the_hole(len - 1); + return top; } - if (maybe_result->IsFailure()) return maybe_result; - MaybeObject* maybe_failure = - accessor->SetLength(array, Smi::FromInt(new_length)); - if (maybe_failure->IsFailure()) return maybe_failure; - return maybe_result; + + top = array->GetPrototype()->GetElement(len - 1); + + return top; } BUILTIN(ArrayShift) { Heap* heap = isolate->heap(); Object* receiver = *args.receiver(); - FixedArrayBase* elms_obj; - MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); - if (maybe_elms_obj == NULL) - return CallJsBuiltin(isolate, "ArrayShift", args); - if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj; - + Object* elms_obj; + { MaybeObject* maybe_elms_obj = + EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); + if (maybe_elms_obj == NULL) + return CallJsBuiltin(isolate, "ArrayShift", args); + if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; + } if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) { return CallJsBuiltin(isolate, "ArrayShift", args); } + FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); - - if (FLAG_harmony_observation && array->map()->is_observed()) { - return CallJsBuiltin(isolate, "ArrayShift", args); - } + ASSERT(array->HasFastSmiOrObjectElements()); int len = Smi::cast(array->length())->value(); if (len == 0) return heap->undefined_value(); // Get first element - ElementsAccessor* accessor = array->GetElementsAccessor(); - Object* first; - MaybeObject* maybe_first = accessor->Get(receiver, array, 0, elms_obj); - if (!maybe_first->To(&first)) return maybe_first; + Object* first = elms->get(0); if (first->IsTheHole()) { first = heap->undefined_value(); } - if (!heap->lo_space()->Contains(elms_obj)) { - array->set_elements(LeftTrimFixedArray(heap, elms_obj, 1)); + if (!heap->lo_space()->Contains(elms)) { + array->set_elements(LeftTrimFixedArray(heap, elms, 1)); } else { // Shift the elements. - if (elms_obj->IsFixedArray()) { - FixedArray* elms = FixedArray::cast(elms_obj); - AssertNoAllocation no_gc; - MoveElements(heap, &no_gc, elms, 0, elms, 1, len - 1); - elms->set(len - 1, heap->the_hole_value()); - } else { - FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj); - MoveDoubleElements(elms, 0, elms, 1, len - 1); - elms->set_the_hole(len - 1); - } + AssertNoAllocation no_gc; + MoveElements(heap, &no_gc, elms, 0, elms, 1, len - 1); + elms->set(len - 1, heap->the_hole_value()); } // Set the length. @@ -743,25 +632,19 @@ BUILTIN(ArrayShift) { BUILTIN(ArrayUnshift) { Heap* heap = isolate->heap(); Object* receiver = *args.receiver(); - FixedArrayBase* elms_obj; - MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); - if (maybe_elms_obj == NULL) - return CallJsBuiltin(isolate, "ArrayUnshift", args); - if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj; - - if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) { - return CallJsBuiltin(isolate, "ArrayUnshift", args); + Object* elms_obj; + { MaybeObject* maybe_elms_obj = + EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); + if (maybe_elms_obj == NULL) + return CallJsBuiltin(isolate, "ArrayUnshift", args); + if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; } - JSArray* array = JSArray::cast(receiver); - if (!array->HasFastSmiOrObjectElements()) { + if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) { return CallJsBuiltin(isolate, "ArrayUnshift", args); } FixedArray* elms = FixedArray::cast(elms_obj); - - if (FLAG_harmony_observation && array->map()->is_observed()) { - return CallJsBuiltin(isolate, "ArrayUnshift", args); - } + JSArray* array = JSArray::cast(receiver); + ASSERT(array->HasFastSmiOrObjectElements()); int len = Smi::cast(array->length())->value(); int to_add = args.length() - 1; @@ -778,18 +661,14 @@ BUILTIN(ArrayUnshift) { if (new_length > elms->length()) { // New backing storage is needed. int capacity = new_length + (new_length >> 1) + 16; - FixedArray* new_elms; - MaybeObject* maybe_elms = heap->AllocateUninitializedFixedArray(capacity); - if (!maybe_elms->To(&new_elms)) return maybe_elms; - + Object* obj; + { MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity); + if (!maybe_obj->ToObject(&obj)) return maybe_obj; + } + FixedArray* new_elms = FixedArray::cast(obj); ElementsKind kind = array->GetElementsKind(); - ElementsAccessor* accessor = array->GetElementsAccessor(); - MaybeObject* maybe_failure = accessor->CopyElements( - NULL, 0, new_elms, kind, to_add, - ElementsAccessor::kCopyToEndAndInitializeToHole, elms); - ASSERT(!maybe_failure->IsFailure()); - USE(maybe_failure); - + CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, to_add, len); + FillWithHoles(heap, new_elms, new_length, capacity); elms = new_elms; array->set_elements(elms); } else { @@ -813,20 +692,16 @@ BUILTIN(ArrayUnshift) { BUILTIN(ArraySlice) { Heap* heap = isolate->heap(); Object* receiver = *args.receiver(); - FixedArrayBase* elms; + FixedArray* elms; int len = -1; if (receiver->IsJSArray()) { JSArray* array = JSArray::cast(receiver); - if (!IsJSArrayFastElementMovingAllowed(heap, array)) { - return CallJsBuiltin(isolate, "ArraySlice", args); - } - - if (array->HasFastElements()) { - elms = array->elements(); - } else { + if (!array->HasFastSmiOrObjectElements() || + !IsJSArrayFastElementMovingAllowed(heap, array)) { return CallJsBuiltin(isolate, "ArraySlice", args); } + elms = FixedArray::cast(array->elements()); len = Smi::cast(array->length())->value(); } else { // Array.slice(arguments, ...) is quite a common idiom (notably more @@ -835,19 +710,15 @@ BUILTIN(ArraySlice) { isolate->context()->native_context()->arguments_boilerplate()->map(); bool is_arguments_object_with_fast_elements = - receiver->IsJSObject() && - JSObject::cast(receiver)->map() == arguments_map; + receiver->IsJSObject() + && JSObject::cast(receiver)->map() == arguments_map + && JSObject::cast(receiver)->HasFastSmiOrObjectElements(); if (!is_arguments_object_with_fast_elements) { return CallJsBuiltin(isolate, "ArraySlice", args); } - JSObject* object = JSObject::cast(receiver); - - if (object->HasFastElements()) { - elms = object->elements(); - } else { - return CallJsBuiltin(isolate, "ArraySlice", args); - } - Object* len_obj = object->InObjectPropertyAt(Heap::kArgumentsLengthIndex); + elms = FixedArray::cast(JSObject::cast(receiver)->elements()); + Object* len_obj = JSObject::cast(receiver) + ->InObjectPropertyAt(Heap::kArgumentsLengthIndex); if (!len_obj->IsSmi()) { return CallJsBuiltin(isolate, "ArraySlice", args); } @@ -855,27 +726,12 @@ BUILTIN(ArraySlice) { if (len > elms->length()) { return CallJsBuiltin(isolate, "ArraySlice", args); } - } - - JSObject* object = JSObject::cast(receiver); - ElementsKind kind = object->GetElementsKind(); - - if (IsHoleyElementsKind(kind)) { - bool packed = true; - ElementsAccessor* accessor = ElementsAccessor::ForKind(kind); for (int i = 0; i < len; i++) { - if (!accessor->HasElement(object, object, i, elms)) { - packed = false; - break; + if (elms->get(i) == heap->the_hole_value()) { + return CallJsBuiltin(isolate, "ArraySlice", args); } } - if (packed) { - kind = GetPackedElementsKind(kind); - } else if (!receiver->IsJSArray()) { - return CallJsBuiltin(isolate, "ArraySlice", args); - } } - ASSERT(len >= 0); int n_arguments = args.length() - 1; @@ -888,12 +744,6 @@ BUILTIN(ArraySlice) { Object* arg1 = args[1]; if (arg1->IsSmi()) { relative_start = Smi::cast(arg1)->value(); - } else if (arg1->IsHeapNumber()) { - double start = HeapNumber::cast(arg1)->value(); - if (start < kMinInt || start > kMaxInt) { - return CallJsBuiltin(isolate, "ArraySlice", args); - } - relative_start = static_cast(start); } else if (!arg1->IsUndefined()) { return CallJsBuiltin(isolate, "ArraySlice", args); } @@ -901,12 +751,6 @@ BUILTIN(ArraySlice) { Object* arg2 = args[2]; if (arg2->IsSmi()) { relative_end = Smi::cast(arg2)->value(); - } else if (arg2->IsHeapNumber()) { - double end = HeapNumber::cast(arg2)->value(); - if (end < kMinInt || end > kMaxInt) { - return CallJsBuiltin(isolate, "ArraySlice", args); - } - relative_end = static_cast(end); } else if (!arg2->IsUndefined()) { return CallJsBuiltin(isolate, "ArraySlice", args); } @@ -921,24 +765,21 @@ BUILTIN(ArraySlice) { int final = (relative_end < 0) ? Max(len + relative_end, 0) : Min(relative_end, len); + ElementsKind elements_kind = JSObject::cast(receiver)->GetElementsKind(); + // Calculate the length of result array. int result_len = Max(final - k, 0); + MaybeObject* maybe_array = + heap->AllocateJSArrayAndStorage(elements_kind, + result_len, + result_len); JSArray* result_array; - MaybeObject* maybe_array = heap->AllocateJSArrayAndStorage(kind, - result_len, - result_len); - - AssertNoAllocation no_gc; - if (result_len == 0) return maybe_array; if (!maybe_array->To(&result_array)) return maybe_array; - ElementsAccessor* accessor = object->GetElementsAccessor(); - MaybeObject* maybe_failure = - accessor->CopyElements(NULL, k, result_array->elements(), - kind, 0, result_len, elms); - ASSERT(!maybe_failure->IsFailure()); - USE(maybe_failure); + CopyObjectToObjectElements(elms, elements_kind, k, + FixedArray::cast(result_array->elements()), + elements_kind, 0, result_len); return result_array; } @@ -947,22 +788,19 @@ BUILTIN(ArraySlice) { BUILTIN(ArraySplice) { Heap* heap = isolate->heap(); Object* receiver = *args.receiver(); - FixedArrayBase* elms_obj; - MaybeObject* maybe_elms = - EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3); - if (maybe_elms == NULL) { - return CallJsBuiltin(isolate, "ArraySplice", args); + Object* elms_obj; + { MaybeObject* maybe_elms_obj = + EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3); + if (maybe_elms_obj == NULL) + return CallJsBuiltin(isolate, "ArraySplice", args); + if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; } - if (!maybe_elms->To(&elms_obj)) return maybe_elms; - if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) { return CallJsBuiltin(isolate, "ArraySplice", args); } + FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); - - if (FLAG_harmony_observation && array->map()->is_observed()) { - return CallJsBuiltin(isolate, "ArraySplice", args); - } + ASSERT(array->HasFastSmiOrObjectElements()); int len = Smi::cast(array->length())->value(); @@ -973,12 +811,6 @@ BUILTIN(ArraySplice) { Object* arg1 = args[1]; if (arg1->IsSmi()) { relative_start = Smi::cast(arg1)->value(); - } else if (arg1->IsHeapNumber()) { - double start = HeapNumber::cast(arg1)->value(); - if (start < kMinInt || start > kMaxInt) { - return CallJsBuiltin(isolate, "ArraySplice", args); - } - relative_start = static_cast(start); } else if (!arg1->IsUndefined()) { return CallJsBuiltin(isolate, "ArraySplice", args); } @@ -1008,84 +840,51 @@ BUILTIN(ArraySplice) { actual_delete_count = Min(Max(value, 0), len - actual_start); } - ElementsKind elements_kind = array->GetElementsKind(); - - int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0; - int new_length = len - actual_delete_count + item_count; - - // For double mode we do not support changing the length. - if (new_length > len && IsFastDoubleElementsKind(elements_kind)) { - return CallJsBuiltin(isolate, "ArraySplice", args); - } - - if (new_length == 0) { - MaybeObject* maybe_array = heap->AllocateJSArrayWithElements( - elms_obj, elements_kind, actual_delete_count); - if (maybe_array->IsFailure()) return maybe_array; - array->set_elements(heap->empty_fixed_array()); - array->set_length(Smi::FromInt(0)); - return maybe_array; - } - JSArray* result_array = NULL; + ElementsKind elements_kind = + JSObject::cast(receiver)->GetElementsKind(); MaybeObject* maybe_array = heap->AllocateJSArrayAndStorage(elements_kind, actual_delete_count, actual_delete_count); if (!maybe_array->To(&result_array)) return maybe_array; - if (actual_delete_count > 0) { - AssertNoAllocation no_gc; - ElementsAccessor* accessor = array->GetElementsAccessor(); - MaybeObject* maybe_failure = - accessor->CopyElements(NULL, actual_start, result_array->elements(), - elements_kind, 0, actual_delete_count, elms_obj); - // Cannot fail since the origin and target array are of the same elements - // kind. - ASSERT(!maybe_failure->IsFailure()); - USE(maybe_failure); + { + // Fill newly created array. + CopyObjectToObjectElements(elms, elements_kind, actual_start, + FixedArray::cast(result_array->elements()), + elements_kind, 0, actual_delete_count); } + int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0; + int new_length = len - actual_delete_count + item_count; + bool elms_changed = false; if (item_count < actual_delete_count) { // Shrink the array. - const bool trim_array = !heap->lo_space()->Contains(elms_obj) && + const bool trim_array = !heap->lo_space()->Contains(elms) && ((actual_start + item_count) < (len - actual_delete_count - actual_start)); if (trim_array) { const int delta = actual_delete_count - item_count; - if (elms_obj->IsFixedDoubleArray()) { - FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj); - MoveDoubleElements(elms, delta, elms, 0, actual_start); - } else { - FixedArray* elms = FixedArray::cast(elms_obj); + { AssertNoAllocation no_gc; MoveElements(heap, &no_gc, elms, delta, elms, 0, actual_start); } - elms_obj = LeftTrimFixedArray(heap, elms_obj, delta); + elms = LeftTrimFixedArray(heap, elms, delta); elms_changed = true; } else { - if (elms_obj->IsFixedDoubleArray()) { - FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj); - MoveDoubleElements(elms, actual_start + item_count, - elms, actual_start + actual_delete_count, - (len - actual_delete_count - actual_start)); - FillWithHoles(elms, new_length, len); - } else { - FixedArray* elms = FixedArray::cast(elms_obj); - AssertNoAllocation no_gc; - MoveElements(heap, &no_gc, - elms, actual_start + item_count, - elms, actual_start + actual_delete_count, - (len - actual_delete_count - actual_start)); - FillWithHoles(heap, elms, new_length, len); - } + AssertNoAllocation no_gc; + MoveElements(heap, &no_gc, + elms, actual_start + item_count, + elms, actual_start + actual_delete_count, + (len - actual_delete_count - actual_start)); + FillWithHoles(heap, elms, new_length, len); } } else if (item_count > actual_delete_count) { - FixedArray* elms = FixedArray::cast(elms_obj); // Currently fixed arrays cannot grow too big, so // we should never hit this case. ASSERT((item_count - actual_delete_count) <= (Smi::kMaxValue - len)); @@ -1094,29 +893,28 @@ BUILTIN(ArraySplice) { if (new_length > elms->length()) { // New backing storage is needed. int capacity = new_length + (new_length >> 1) + 16; - FixedArray* new_elms; - MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity); - if (!maybe_obj->To(&new_elms)) return maybe_obj; - - AssertNoAllocation no_gc; + Object* obj; + { MaybeObject* maybe_obj = + heap->AllocateUninitializedFixedArray(capacity); + if (!maybe_obj->ToObject(&obj)) return maybe_obj; + } + FixedArray* new_elms = FixedArray::cast(obj); - ElementsKind kind = array->GetElementsKind(); - ElementsAccessor* accessor = array->GetElementsAccessor(); - if (actual_start > 0) { + { // Copy the part before actual_start as is. - MaybeObject* maybe_failure = accessor->CopyElements( - NULL, 0, new_elms, kind, 0, actual_start, elms); - ASSERT(!maybe_failure->IsFailure()); - USE(maybe_failure); + ElementsKind kind = array->GetElementsKind(); + CopyObjectToObjectElements(elms, kind, 0, + new_elms, kind, 0, actual_start); + const int to_copy = len - actual_delete_count - actual_start; + CopyObjectToObjectElements(elms, kind, + actual_start + actual_delete_count, + new_elms, kind, + actual_start + item_count, to_copy); } - MaybeObject* maybe_failure = accessor->CopyElements( - NULL, actual_start + actual_delete_count, new_elms, kind, - actual_start + item_count, - ElementsAccessor::kCopyToEndAndInitializeToHole, elms); - ASSERT(!maybe_failure->IsFailure()); - USE(maybe_failure); - - elms_obj = new_elms; + + FillWithHoles(heap, new_elms, new_length, capacity); + + elms = new_elms; elms_changed = true; } else { AssertNoAllocation no_gc; @@ -1127,28 +925,16 @@ BUILTIN(ArraySplice) { } } - if (IsFastDoubleElementsKind(elements_kind)) { - FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj); - for (int k = actual_start; k < actual_start + item_count; k++) { - Object* arg = args[3 + k - actual_start]; - if (arg->IsSmi()) { - elms->set(k, Smi::cast(arg)->value()); - } else { - elms->set(k, HeapNumber::cast(arg)->value()); - } - } - } else { - FixedArray* elms = FixedArray::cast(elms_obj); - AssertNoAllocation no_gc; - WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); - for (int k = actual_start; k < actual_start + item_count; k++) { - elms->set(k, args[3 + k - actual_start], mode); - } + AssertNoAllocation no_gc; + WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); + for (int k = actual_start; k < actual_start + item_count; k++) { + elms->set(k, args[3 + k - actual_start], mode); } if (elms_changed) { - array->set_elements(elms_obj); + array->set_elements(elms); } + // Set the length. array->set_length(Smi::FromInt(new_length)); @@ -1170,15 +956,14 @@ BUILTIN(ArrayConcat) { int n_arguments = args.length(); int result_len = 0; ElementsKind elements_kind = GetInitialFastElementsKind(); - bool has_double = false; - bool is_holey = false; for (int i = 0; i < n_arguments; i++) { Object* arg = args[i]; if (!arg->IsJSArray() || - !JSArray::cast(arg)->HasFastElements() || + !JSArray::cast(arg)->HasFastSmiOrObjectElements() || JSArray::cast(arg)->GetPrototype() != array_proto) { return CallJsBuiltin(isolate, "ArrayConcat", args); } + int len = Smi::cast(JSArray::cast(arg)->length())->value(); // We shouldn't overflow when adding another len. @@ -1188,51 +973,47 @@ BUILTIN(ArrayConcat) { result_len += len; ASSERT(result_len >= 0); - if (result_len > FixedDoubleArray::kMaxLength) { + if (result_len > FixedArray::kMaxLength) { return CallJsBuiltin(isolate, "ArrayConcat", args); } - ElementsKind arg_kind = JSArray::cast(arg)->map()->elements_kind(); - has_double = has_double || IsFastDoubleElementsKind(arg_kind); - is_holey = is_holey || IsFastHoleyElementsKind(arg_kind); - if (IsMoreGeneralElementsKindTransition(elements_kind, arg_kind)) { - elements_kind = arg_kind; + if (!JSArray::cast(arg)->HasFastSmiElements()) { + if (IsFastSmiElementsKind(elements_kind)) { + if (IsFastHoleyElementsKind(elements_kind)) { + elements_kind = FAST_HOLEY_ELEMENTS; + } else { + elements_kind = FAST_ELEMENTS; + } + } + } + + if (JSArray::cast(arg)->HasFastHoleyElements()) { + elements_kind = GetHoleyElementsKind(elements_kind); } } - if (is_holey) elements_kind = GetHoleyElementsKind(elements_kind); - - // If a double array is concatted into a fast elements array, the fast - // elements array needs to be initialized to contain proper holes, since - // boxing doubles may cause incremental marking. - ArrayStorageAllocationMode mode = - has_double && IsFastObjectElementsKind(elements_kind) - ? INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE : DONT_INITIALIZE_ARRAY_ELEMENTS; - JSArray* result_array; // Allocate result. + JSArray* result_array; MaybeObject* maybe_array = heap->AllocateJSArrayAndStorage(elements_kind, result_len, - result_len, - mode); + result_len); if (!maybe_array->To(&result_array)) return maybe_array; if (result_len == 0) return result_array; - int j = 0; - FixedArrayBase* storage = result_array->elements(); + // Copy data. + int start_pos = 0; + FixedArray* result_elms(FixedArray::cast(result_array->elements())); for (int i = 0; i < n_arguments; i++) { JSArray* array = JSArray::cast(args[i]); int len = Smi::cast(array->length())->value(); - if (len > 0) { - ElementsAccessor* accessor = array->GetElementsAccessor(); - MaybeObject* maybe_failure = - accessor->CopyElements(array, 0, storage, elements_kind, j, len); - if (maybe_failure->IsFailure()) return maybe_failure; - j += len; - } + FixedArray* elms = FixedArray::cast(array->elements()); + CopyObjectToObjectElements(elms, elements_kind, 0, + result_elms, elements_kind, + start_pos, len); + start_pos += len; } - - ASSERT(j == result_len); + ASSERT(start_pos == result_len); return result_array; } @@ -1252,28 +1033,12 @@ BUILTIN(StrictModePoisonPill) { // -// Searches the hidden prototype chain of the given object for the first -// object that is an instance of the given type. If no such object can -// be found then Heap::null_value() is returned. -static inline Object* FindHidden(Heap* heap, - Object* object, - FunctionTemplateInfo* type) { - if (object->IsInstanceOf(type)) return object; - Object* proto = object->GetPrototype(); - if (proto->IsJSObject() && - JSObject::cast(proto)->map()->is_hidden_prototype()) { - return FindHidden(heap, proto, type); - } - return heap->null_value(); -} - - // Returns the holder JSObject if the function can legally be called // with this receiver. Returns Heap::null_value() if the call is // illegal. Any arguments that don't fit the expected type is -// overwritten with undefined. Note that holder and the arguments are -// implicitly rewritten with the first object in the hidden prototype -// chain that actually has the expected type. +// overwritten with undefined. Arguments that do fit the expected +// type is overwritten with the object in the prototype chain that +// actually has that type. static inline Object* TypeCheck(Heap* heap, int argc, Object** argv, @@ -1286,10 +1051,15 @@ static inline Object* TypeCheck(Heap* heap, SignatureInfo* sig = SignatureInfo::cast(sig_obj); // If necessary, check the receiver Object* recv_type = sig->receiver(); + Object* holder = recv; if (!recv_type->IsUndefined()) { - holder = FindHidden(heap, holder, FunctionTemplateInfo::cast(recv_type)); - if (holder == heap->null_value()) return heap->null_value(); + for (; holder != heap->null_value(); holder = holder->GetPrototype()) { + if (holder->IsInstanceOf(FunctionTemplateInfo::cast(recv_type))) { + break; + } + } + if (holder == heap->null_value()) return holder; } Object* args_obj = sig->args(); // If there is no argument signature we're done @@ -1302,9 +1072,13 @@ static inline Object* TypeCheck(Heap* heap, if (argtype->IsUndefined()) continue; Object** arg = &argv[-1 - i]; Object* current = *arg; - current = FindHidden(heap, current, FunctionTemplateInfo::cast(argtype)); - if (current == heap->null_value()) current = heap->undefined_value(); - *arg = current; + for (; current != heap->null_value(); current = current->GetPrototype()) { + if (current->IsInstanceOf(FunctionTemplateInfo::cast(argtype))) { + *arg = current; + break; + } + } + if (current == heap->null_value()) *arg = heap->undefined_value(); } return holder; } diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h index a2f752e..ca70ae5 100644 --- a/deps/v8/src/builtins.h +++ b/deps/v8/src/builtins.h @@ -38,25 +38,6 @@ enum BuiltinExtraArguments { }; -#define CODE_AGE_LIST_WITH_ARG(V, A) \ - V(Quadragenarian, A) \ - V(Quinquagenarian, A) \ - V(Sexagenarian, A) \ - V(Septuagenarian, A) \ - V(Octogenarian, A) - -#define CODE_AGE_LIST_IGNORE_ARG(X, V) V(X) - -#define CODE_AGE_LIST(V) \ - CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V) - -#define DECLARE_CODE_AGE_BUILTIN(C, V) \ - V(Make##C##CodeYoungAgainOddMarking, BUILTIN, \ - UNINITIALIZED, Code::kNoExtraICState) \ - V(Make##C##CodeYoungAgainEvenMarking, BUILTIN, \ - UNINITIALIZED, Code::kNoExtraICState) - - // Define list of builtins implemented in C++. #define BUILTIN_LIST_C(V) \ V(Illegal, NO_EXTRA_ARGUMENTS) \ @@ -214,8 +195,8 @@ enum BuiltinExtraArguments { Code::kNoExtraICState) \ \ V(OnStackReplacement, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ - CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V) + Code::kNoExtraICState) + #ifdef ENABLE_DEBUGGER_SUPPORT // Define list of builtins used by the debugger implemented in assembly. @@ -398,14 +379,6 @@ class Builtins { static void Generate_StringConstructCode(MacroAssembler* masm); static void Generate_OnStackReplacement(MacroAssembler* masm); -#define DECLARE_CODE_AGE_BUILTIN_GENERATOR(C) \ - static void Generate_Make##C##CodeYoungAgainEvenMarking( \ - MacroAssembler* masm); \ - static void Generate_Make##C##CodeYoungAgainOddMarking( \ - MacroAssembler* masm); - CODE_AGE_LIST(DECLARE_CODE_AGE_BUILTIN_GENERATOR) -#undef DECLARE_CODE_AGE_BUILTIN_GENERATOR - static void InitBuiltinFunctionTable(); bool initialized_; diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc index 276c87e..7a72059 100644 --- a/deps/v8/src/code-stubs.cc +++ b/deps/v8/src/code-stubs.cc @@ -37,11 +37,11 @@ namespace v8 { namespace internal { -bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) { - UnseededNumberDictionary* stubs = isolate->heap()->code_stubs(); - int index = stubs->FindEntry(GetKey()); +bool CodeStub::FindCodeInCache(Code** code_out) { + Heap* heap = Isolate::Current()->heap(); + int index = heap->code_stubs()->FindEntry(GetKey()); if (index != UnseededNumberDictionary::kNotFound) { - *code_out = Code::cast(stubs->ValueAt(index)); + *code_out = Code::cast(heap->code_stubs()->ValueAt(index)); return true; } return false; @@ -93,8 +93,8 @@ Handle CodeStub::GetCode() { Heap* heap = isolate->heap(); Code* code; if (UseSpecialCache() - ? FindCodeInSpecialCache(&code, isolate) - : FindCodeInCache(&code, isolate)) { + ? FindCodeInSpecialCache(&code) + : FindCodeInCache(&code)) { ASSERT(IsPregenerated() == code->is_pregenerated()); return Handle(code); } @@ -169,122 +169,6 @@ void CodeStub::PrintName(StringStream* stream) { } -void BinaryOpStub::Generate(MacroAssembler* masm) { - // Explicitly allow generation of nested stubs. It is safe here because - // generation code does not use any raw pointers. - AllowStubCallsScope allow_stub_calls(masm, true); - - BinaryOpIC::TypeInfo operands_type = Max(left_type_, right_type_); - if (left_type_ == BinaryOpIC::ODDBALL && right_type_ == BinaryOpIC::ODDBALL) { - // The OddballStub handles a number and an oddball, not two oddballs. - operands_type = BinaryOpIC::GENERIC; - } - switch (operands_type) { - case BinaryOpIC::UNINITIALIZED: - GenerateTypeTransition(masm); - break; - case BinaryOpIC::SMI: - GenerateSmiStub(masm); - break; - case BinaryOpIC::INT32: - GenerateInt32Stub(masm); - break; - case BinaryOpIC::HEAP_NUMBER: - GenerateHeapNumberStub(masm); - break; - case BinaryOpIC::ODDBALL: - GenerateOddballStub(masm); - break; - case BinaryOpIC::STRING: - GenerateStringStub(masm); - break; - case BinaryOpIC::GENERIC: - GenerateGeneric(masm); - break; - default: - UNREACHABLE(); - } -} - - -#define __ ACCESS_MASM(masm) - - -void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { - switch (op_) { - case Token::ADD: - __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); - break; - case Token::SUB: - __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); - break; - case Token::MUL: - __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); - break; - case Token::DIV: - __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); - break; - case Token::MOD: - __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); - break; - case Token::BIT_OR: - __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); - break; - case Token::BIT_AND: - __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); - break; - case Token::BIT_XOR: - __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); - break; - case Token::SAR: - __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); - break; - case Token::SHR: - __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); - break; - case Token::SHL: - __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); - break; - default: - UNREACHABLE(); - } -} - - -#undef __ - - -void BinaryOpStub::PrintName(StringStream* stream) { - const char* op_name = Token::Name(op_); - const char* overwrite_name; - switch (mode_) { - case NO_OVERWRITE: overwrite_name = "Alloc"; break; - case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; - case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; - default: overwrite_name = "UnknownOverwrite"; break; - } - stream->Add("BinaryOpStub_%s_%s_%s+%s", - op_name, - overwrite_name, - BinaryOpIC::GetName(left_type_), - BinaryOpIC::GetName(right_type_)); -} - - -void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) { - ASSERT(left_type_ == BinaryOpIC::STRING || right_type_ == BinaryOpIC::STRING); - ASSERT(op_ == Token::ADD); - if (left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING) { - GenerateBothStringStub(masm); - return; - } - // Try to add arguments as strings, otherwise, transition to the generic - // BinaryOpIC type. - GenerateAddStrings(masm); - GenerateTypeTransition(masm); -} - - void ICCompareStub::AddToSpecialCache(Handle new_object) { ASSERT(*known_map_ != NULL); Isolate* isolate = new_object->GetIsolate(); @@ -297,7 +181,8 @@ void ICCompareStub::AddToSpecialCache(Handle new_object) { } -bool ICCompareStub::FindCodeInSpecialCache(Code** code_out, Isolate* isolate) { +bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) { + Isolate* isolate = known_map_->GetIsolate(); Factory* factory = isolate->factory(); Code::Flags flags = Code::ComputeFlags( static_cast(GetCodeKind()), @@ -311,12 +196,7 @@ bool ICCompareStub::FindCodeInSpecialCache(Code** code_out, Isolate* isolate) { flags)); if (probe->IsCode()) { *code_out = Code::cast(*probe); -#ifdef DEBUG - Token::Value cached_op; - ICCompareStub::DecodeMinorKey((*code_out)->stub_info(), NULL, NULL, NULL, - &cached_op); - ASSERT(op_ == cached_op); -#endif + ASSERT(op_ == (*code_out)->compare_operation() + Token::EQ); return true; } return false; @@ -324,33 +204,7 @@ bool ICCompareStub::FindCodeInSpecialCache(Code** code_out, Isolate* isolate) { int ICCompareStub::MinorKey() { - return OpField::encode(op_ - Token::EQ) | - LeftStateField::encode(left_) | - RightStateField::encode(right_) | - HandlerStateField::encode(state_); -} - - -void ICCompareStub::DecodeMinorKey(int minor_key, - CompareIC::State* left_state, - CompareIC::State* right_state, - CompareIC::State* handler_state, - Token::Value* op) { - if (left_state) { - *left_state = - static_cast(LeftStateField::decode(minor_key)); - } - if (right_state) { - *right_state = - static_cast(RightStateField::decode(minor_key)); - } - if (handler_state) { - *handler_state = - static_cast(HandlerStateField::decode(minor_key)); - } - if (op) { - *op = static_cast(OpField::decode(minor_key) + Token::EQ); - } + return OpField::encode(op_ - Token::EQ) | StateField::encode(state_); } @@ -359,28 +213,27 @@ void ICCompareStub::Generate(MacroAssembler* masm) { case CompareIC::UNINITIALIZED: GenerateMiss(masm); break; - case CompareIC::SMI: + case CompareIC::SMIS: GenerateSmis(masm); break; - case CompareIC::HEAP_NUMBER: + case CompareIC::HEAP_NUMBERS: GenerateHeapNumbers(masm); break; - case CompareIC::STRING: + case CompareIC::STRINGS: GenerateStrings(masm); break; - case CompareIC::SYMBOL: + case CompareIC::SYMBOLS: GenerateSymbols(masm); break; - case CompareIC::OBJECT: + case CompareIC::OBJECTS: GenerateObjects(masm); break; case CompareIC::KNOWN_OBJECTS: ASSERT(*known_map_ != NULL); GenerateKnownObjects(masm); break; - case CompareIC::GENERIC: - GenerateGeneric(masm); - break; + default: + UNREACHABLE(); } } diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h index ae113f5..a843841 100644 --- a/deps/v8/src/code-stubs.h +++ b/deps/v8/src/code-stubs.h @@ -141,7 +141,7 @@ class CodeStub BASE_EMBEDDED { bool CompilingCallsToThisStubIsGCSafe() { bool is_pregenerated = IsPregenerated(); Code* code = NULL; - CHECK(!is_pregenerated || FindCodeInCache(&code, Isolate::Current())); + CHECK(!is_pregenerated || FindCodeInCache(&code)); return is_pregenerated; } @@ -160,7 +160,7 @@ class CodeStub BASE_EMBEDDED { virtual bool SometimesSetsUpAFrame() { return true; } // Lookup the code in the (possibly custom) cache. - bool FindCodeInCache(Code** code_out, Isolate* isolate); + bool FindCodeInCache(Code** code_out); protected: static bool CanUseFPRegisters(); @@ -202,9 +202,7 @@ class CodeStub BASE_EMBEDDED { virtual void AddToSpecialCache(Handle new_object) { } // Find code in a specialized cache, work is delegated to the specific stub. - virtual bool FindCodeInSpecialCache(Code** code_out, Isolate* isolate) { - return false; - } + virtual bool FindCodeInSpecialCache(Code** code_out) { return false; } // If a stub uses a special cache override this. virtual bool UseSpecialCache() { return false; } @@ -484,132 +482,10 @@ class MathPowStub: public CodeStub { }; -class BinaryOpStub: public CodeStub { - public: - BinaryOpStub(Token::Value op, OverwriteMode mode) - : op_(op), - mode_(mode), - platform_specific_bit_(false), - left_type_(BinaryOpIC::UNINITIALIZED), - right_type_(BinaryOpIC::UNINITIALIZED), - result_type_(BinaryOpIC::UNINITIALIZED) { - Initialize(); - ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); - } - - BinaryOpStub( - int key, - BinaryOpIC::TypeInfo left_type, - BinaryOpIC::TypeInfo right_type, - BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED) - : op_(OpBits::decode(key)), - mode_(ModeBits::decode(key)), - platform_specific_bit_(PlatformSpecificBits::decode(key)), - left_type_(left_type), - right_type_(right_type), - result_type_(result_type) { } - - static void decode_types_from_minor_key(int minor_key, - BinaryOpIC::TypeInfo* left_type, - BinaryOpIC::TypeInfo* right_type, - BinaryOpIC::TypeInfo* result_type) { - *left_type = - static_cast(LeftTypeBits::decode(minor_key)); - *right_type = - static_cast(RightTypeBits::decode(minor_key)); - *result_type = - static_cast(ResultTypeBits::decode(minor_key)); - } - - static Token::Value decode_op_from_minor_key(int minor_key) { - return static_cast(OpBits::decode(minor_key)); - } - - enum SmiCodeGenerateHeapNumberResults { - ALLOW_HEAPNUMBER_RESULTS, - NO_HEAPNUMBER_RESULTS - }; - - private: - Token::Value op_; - OverwriteMode mode_; - bool platform_specific_bit_; // Indicates SSE3 on IA32, VFP2 on ARM. - - // Operand type information determined at runtime. - BinaryOpIC::TypeInfo left_type_; - BinaryOpIC::TypeInfo right_type_; - BinaryOpIC::TypeInfo result_type_; - - virtual void PrintName(StringStream* stream); - - // Minor key encoding in 19 bits TTTRRRLLLSOOOOOOOMM. - class ModeBits: public BitField {}; - class OpBits: public BitField {}; - class PlatformSpecificBits: public BitField {}; - class LeftTypeBits: public BitField {}; - class RightTypeBits: public BitField {}; - class ResultTypeBits: public BitField {}; - - Major MajorKey() { return BinaryOp; } - int MinorKey() { - return OpBits::encode(op_) - | ModeBits::encode(mode_) - | PlatformSpecificBits::encode(platform_specific_bit_) - | LeftTypeBits::encode(left_type_) - | RightTypeBits::encode(right_type_) - | ResultTypeBits::encode(result_type_); - } - - - // Platform-independent implementation. - void Generate(MacroAssembler* masm); - void GenerateCallRuntime(MacroAssembler* masm); - - // Platform-independent signature, platform-specific implementation. - void Initialize(); - void GenerateAddStrings(MacroAssembler* masm); - void GenerateBothStringStub(MacroAssembler* masm); - void GenerateGeneric(MacroAssembler* masm); - void GenerateGenericStub(MacroAssembler* masm); - void GenerateHeapNumberStub(MacroAssembler* masm); - void GenerateInt32Stub(MacroAssembler* masm); - void GenerateLoadArguments(MacroAssembler* masm); - void GenerateOddballStub(MacroAssembler* masm); - void GenerateRegisterArgsPush(MacroAssembler* masm); - void GenerateReturn(MacroAssembler* masm); - void GenerateSmiStub(MacroAssembler* masm); - void GenerateStringStub(MacroAssembler* masm); - void GenerateTypeTransition(MacroAssembler* masm); - void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm); - void GenerateUninitializedStub(MacroAssembler* masm); - - // Entirely platform-specific methods are defined as static helper - // functions in the /code-stubs-.cc files. - - virtual int GetCodeKind() { return Code::BINARY_OP_IC; } - - virtual InlineCacheState GetICState() { - return BinaryOpIC::ToState(Max(left_type_, right_type_)); - } - - virtual void FinishCode(Handle code) { - code->set_stub_info(MinorKey()); - } - - friend class CodeGenerator; -}; - - class ICCompareStub: public CodeStub { public: - ICCompareStub(Token::Value op, - CompareIC::State left, - CompareIC::State right, - CompareIC::State handler) - : op_(op), - left_(left), - right_(right), - state_(handler) { + ICCompareStub(Token::Value op, CompareIC::State state) + : op_(op), state_(state) { ASSERT(Token::IsCompareOp(op)); } @@ -617,24 +493,13 @@ class ICCompareStub: public CodeStub { void set_known_map(Handle map) { known_map_ = map; } - static void DecodeMinorKey(int minor_key, - CompareIC::State* left_state, - CompareIC::State* right_state, - CompareIC::State* handler_state, - Token::Value* op); - - static CompareIC::State CompareState(int minor_key) { - return static_cast(HandlerStateField::decode(minor_key)); - } - private: class OpField: public BitField { }; - class LeftStateField: public BitField { }; - class RightStateField: public BitField { }; - class HandlerStateField: public BitField { }; + class StateField: public BitField { }; virtual void FinishCode(Handle code) { - code->set_stub_info(MinorKey()); + code->set_compare_state(state_); + code->set_compare_operation(op_ - Token::EQ); } virtual CodeStub::Major MajorKey() { return CompareIC; } @@ -649,23 +514,117 @@ class ICCompareStub: public CodeStub { void GenerateObjects(MacroAssembler* masm); void GenerateMiss(MacroAssembler* masm); void GenerateKnownObjects(MacroAssembler* masm); - void GenerateGeneric(MacroAssembler* masm); bool strict() const { return op_ == Token::EQ_STRICT; } Condition GetCondition() const { return CompareIC::ComputeCondition(op_); } virtual void AddToSpecialCache(Handle new_object); - virtual bool FindCodeInSpecialCache(Code** code_out, Isolate* isolate); + virtual bool FindCodeInSpecialCache(Code** code_out); virtual bool UseSpecialCache() { return state_ == CompareIC::KNOWN_OBJECTS; } Token::Value op_; - CompareIC::State left_; - CompareIC::State right_; CompareIC::State state_; Handle known_map_; }; +// Flags that control the compare stub code generation. +enum CompareFlags { + NO_COMPARE_FLAGS = 0, + NO_SMI_COMPARE_IN_STUB = 1 << 0, + NO_NUMBER_COMPARE_IN_STUB = 1 << 1, + CANT_BOTH_BE_NAN = 1 << 2 +}; + + +enum NaNInformation { + kBothCouldBeNaN, + kCantBothBeNaN +}; + + +class CompareStub: public CodeStub { + public: + CompareStub(Condition cc, + bool strict, + CompareFlags flags, + Register lhs, + Register rhs) : + cc_(cc), + strict_(strict), + never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0), + include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0), + include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0), + lhs_(lhs), + rhs_(rhs) { } + + CompareStub(Condition cc, + bool strict, + CompareFlags flags) : + cc_(cc), + strict_(strict), + never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0), + include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0), + include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0), + lhs_(no_reg), + rhs_(no_reg) { } + + void Generate(MacroAssembler* masm); + + private: + Condition cc_; + bool strict_; + // Only used for 'equal' comparisons. Tells the stub that we already know + // that at least one side of the comparison is not NaN. This allows the + // stub to use object identity in the positive case. We ignore it when + // generating the minor key for other comparisons to avoid creating more + // stubs. + bool never_nan_nan_; + // Do generate the number comparison code in the stub. Stubs without number + // comparison code is used when the number comparison has been inlined, and + // the stub will be called if one of the operands is not a number. + bool include_number_compare_; + + // Generate the comparison code for two smi operands in the stub. + bool include_smi_compare_; + + // Register holding the left hand side of the comparison if the stub gives + // a choice, no_reg otherwise. + + Register lhs_; + // Register holding the right hand side of the comparison if the stub gives + // a choice, no_reg otherwise. + Register rhs_; + + // Encoding of the minor key in 16 bits. + class StrictField: public BitField {}; + class NeverNanNanField: public BitField {}; + class IncludeNumberCompareField: public BitField {}; + class IncludeSmiCompareField: public BitField {}; + class RegisterField: public BitField {}; + class ConditionField: public BitField {}; + + Major MajorKey() { return Compare; } + + int MinorKey(); + + virtual int GetCodeKind() { return Code::COMPARE_IC; } + virtual void FinishCode(Handle code) { + code->set_compare_state(CompareIC::GENERIC); + } + + // Branch to the label if the given object isn't a symbol. + void BranchIfNonSymbol(MacroAssembler* masm, + Label* label, + Register object, + Register scratch); + + // Unfortunately you have to run without snapshots to see most of these + // names in the profile since most compare stubs end up in the snapshot. + virtual void PrintName(StringStream* stream); +}; + + class CEntryStub : public CodeStub { public: explicit CEntryStub(int result_size, @@ -1094,9 +1053,6 @@ class ToBooleanStub: public CodeStub { bool IsEmpty() const { return set_.IsEmpty(); } bool Contains(Type type) const { return set_.Contains(type); } - bool ContainsAnyOf(Types types) const { - return set_.ContainsAnyOf(types.set_); - } void Add(Type type) { set_.Add(type); } byte ToByte() const { return set_.ToIntegral(); } void Print(StringStream* stream) const; @@ -1215,8 +1171,6 @@ class ProfileEntryHookStub : public CodeStub { // non-NULL hook. static bool SetFunctionEntryHook(FunctionEntryHook entry_hook); - static bool HasEntryHook() { return entry_hook_ != NULL; } - private: static void EntryHookTrampoline(intptr_t function, intptr_t stack_pointer); diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc index 83ac854..0163580 100644 --- a/deps/v8/src/codegen.cc +++ b/deps/v8/src/codegen.cc @@ -107,7 +107,6 @@ Handle CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm, if (!code.is_null()) { isolate->counters()->total_compiled_code_size()->Increment( code->instruction_size()); - code->set_prologue_offset(info->prologue_offset()); } return code; } diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h index 0ac68c2..08a777f 100644 --- a/deps/v8/src/codegen.h +++ b/deps/v8/src/codegen.h @@ -90,7 +90,6 @@ namespace internal { typedef double (*UnaryMathFunction)(double x); UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type); -UnaryMathFunction CreateExpFunction(); UnaryMathFunction CreateSqrtFunction(); @@ -104,19 +103,6 @@ class ElementsTransitionGenerator : public AllStatic { DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator); }; - -class SeqStringSetCharGenerator : public AllStatic { - public: - static void Generate(MacroAssembler* masm, - String::Encoding encoding, - Register string, - Register index, - Register value); - private: - DISALLOW_COPY_AND_ASSIGN(SeqStringSetCharGenerator); -}; - - } } // namespace v8::internal #endif // V8_CODEGEN_H_ diff --git a/deps/v8/src/collection.js b/deps/v8/src/collection.js index b3c2db7..d36fe18 100644 --- a/deps/v8/src/collection.js +++ b/deps/v8/src/collection.js @@ -88,25 +88,6 @@ function SetDelete(key) { } -function SetGetSize() { - if (!IS_SET(this)) { - throw MakeTypeError('incompatible_method_receiver', - ['Set.prototype.size', this]); - } - return %SetGetSize(this); -} - - -function SetClear() { - if (!IS_SET(this)) { - throw MakeTypeError('incompatible_method_receiver', - ['Set.prototype.clear', this]); - } - // Replace the internal table with a new empty table. - %SetInitialize(this); -} - - function MapConstructor() { if (%_IsConstructCall()) { %MapInitialize(this); @@ -164,25 +145,6 @@ function MapDelete(key) { } -function MapGetSize() { - if (!IS_MAP(this)) { - throw MakeTypeError('incompatible_method_receiver', - ['Map.prototype.size', this]); - } - return %MapGetSize(this); -} - - -function MapClear() { - if (!IS_MAP(this)) { - throw MakeTypeError('incompatible_method_receiver', - ['Map.prototype.clear', this]); - } - // Replace the internal table with a new empty table. - %MapInitialize(this); -} - - function WeakMapConstructor() { if (%_IsConstructCall()) { %WeakMapInitialize(this); @@ -253,22 +215,18 @@ function WeakMapDelete(key) { %SetProperty($Map.prototype, "constructor", $Map, DONT_ENUM); // Set up the non-enumerable functions on the Set prototype object. - InstallGetter($Set.prototype, "size", SetGetSize); InstallFunctions($Set.prototype, DONT_ENUM, $Array( "add", SetAdd, "has", SetHas, - "delete", SetDelete, - "clear", SetClear + "delete", SetDelete )); // Set up the non-enumerable functions on the Map prototype object. - InstallGetter($Map.prototype, "size", MapGetSize); InstallFunctions($Map.prototype, DONT_ENUM, $Array( "get", MapGet, "set", MapSet, "has", MapHas, - "delete", MapDelete, - "clear", MapClear + "delete", MapDelete )); // Set up the WeakMap constructor function. diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc index 904e84f..c064576 100644 --- a/deps/v8/src/compilation-cache.cc +++ b/deps/v8/src/compilation-cache.cc @@ -98,7 +98,7 @@ void CompilationSubCache::Age() { void CompilationSubCache::IterateFunctions(ObjectVisitor* v) { - Object* undefined = isolate()->heap()->undefined_value(); + Object* undefined = isolate()->heap()->raw_unchecked_undefined_value(); for (int i = 0; i < generations_; i++) { if (tables_[i] != undefined) { reinterpret_cast(tables_[i])->IterateElements(v); diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index 7e4eaa2..8637437 100644 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -52,53 +52,57 @@ namespace internal { CompilationInfo::CompilationInfo(Handle - - - - - - - - - - - - -

- Chrome V8 profiling log processor -

-

-Process V8's profiling information log (sampling profiler tick information) -in your browser. Particularly useful if you don't have the V8 shell (d8) -at hand on your system. You still have to run Chrome with the appropriate - - command line flags -to produce the profiling log. -

-

Usage:

-

-Click on the button and browse to the profiling log file (usually, v8.log). -Process will start automatically and the output will be visible in the below -text area. -

-

Limitations and disclaimer:

-

-This page offers a subset of the functionalities of the command-line tick -processor utility in the V8 repository. In particular, this page cannot -access the command-line utility that provides library symbol information, -hence the [C++] section of the output stays empty. Also consider that this -web-based tool is provided only for convenience and quick reference, you -should refer to the - - command-line -version for full output. -

-

- -

-

- -

-

-Copyright the V8 Authors - Last change to this page: 12/12/2012 -

- - - - diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js index 7530c6b..4c4886d 100644 --- a/deps/v8/tools/tickprocessor.js +++ b/deps/v8/tools/tickprocessor.js @@ -73,7 +73,7 @@ function parseState(s) { function SnapshotLogProcessor() { LogReader.call(this, { 'code-creation': { - parsers: [null, parseInt, parseInt, parseInt, null, 'var-args'], + parsers: [null, parseInt, parseInt, null, 'var-args'], processor: this.processCodeCreation }, 'code-move': { parsers: [parseInt, parseInt], processor: this.processCodeMove }, @@ -107,7 +107,7 @@ inherits(SnapshotLogProcessor, LogReader); SnapshotLogProcessor.prototype.processCodeCreation = function( - type, kind, start, size, name, maybe_func) { + type, start, size, name, maybe_func) { if (maybe_func.length) { var funcAddr = parseInt(maybe_func[0]); var state = parseState(maybe_func[1]); @@ -156,7 +156,7 @@ function TickProcessor( 'shared-library': { parsers: [null, parseInt, parseInt], processor: this.processSharedLibrary }, 'code-creation': { - parsers: [null, parseInt, parseInt, parseInt, null, 'var-args'], + parsers: [null, parseInt, parseInt, null, 'var-args'], processor: this.processCodeCreation }, 'code-move': { parsers: [parseInt, parseInt], processor: this.processCodeMove }, @@ -167,7 +167,7 @@ function TickProcessor( 'snapshot-pos': { parsers: [parseInt, parseInt], processor: this.processSnapshotPosition }, 'tick': { - parsers: [parseInt, parseInt, parseInt, parseInt, + parsers: [parseInt, parseInt, parseInt, parseInt, parseInt, 'var-args'], processor: this.processTick }, 'heap-sample-begin': { parsers: [null, null, parseInt], @@ -231,9 +231,8 @@ TickProcessor.VmStates = { JS: 0, GC: 1, COMPILER: 2, - PARALLEL_COMPILER: 3, - OTHER: 4, - EXTERNAL: 5 + OTHER: 3, + EXTERNAL: 4 }; @@ -309,7 +308,7 @@ TickProcessor.prototype.processSharedLibrary = function( TickProcessor.prototype.processCodeCreation = function( - type, kind, start, size, name, maybe_func) { + type, start, size, name, maybe_func) { name = this.deserializedEntriesNames_[start] || name; if (maybe_func.length) { var funcAddr = parseInt(maybe_func[0]); @@ -350,7 +349,6 @@ TickProcessor.prototype.includeTick = function(vmState) { TickProcessor.prototype.processTick = function(pc, sp, - ns_since_start, is_external_callback, tos_or_external_callback, vmState, -- 2.7.4